prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import logging
import re
import pandas as pd
from unidecode import unidecode
from comvest.utilities.io import files, read_from_db, write_result, read_result
from comvest.utilities.logging import progresslog, resultlog
pd.options.mode.chained_assignment = None # default='warn'
def validacao_curso(df, col, date):
cursos = df_cursos.loc[df_cursos['ano_vest'] == date]['cod_curso'].tolist()
# Codigos que nao constam na lista de cursos serao remapeados para missing
df[col].fillna(-1, inplace=True)
df[col] = df[col].map(lambda cod: int(cod) if int(cod) in cursos else '')
df[col] = pd.to_numeric(df[col], errors='coerce').astype('Int64')
return df
# Função para concatenar dia, mês e ano
def data_nasc(row, df):
if ('DATA_NASC' in df.columns) or ('DAT_NASC' in df.columns) or ('DTNASC' in df.columns):
if 'DATA_NASC' in df.columns:
data = row['DATA_NASC']
elif 'DAT_NASC' in df.columns:
data = row['DAT_NASC']
else:
data = row['DTNASC']
data = str(data).split('.')[0]
if data == 'nan': return ('')
if len(data) <= 6:
data = data[:-2] + '19' + data[-2:]
ano = data[-4:]
mes = data[-6:-4]
dia = data.replace(data[-6:], '')
if len(data) < 8:
dia = '0' + dia
res = dia + mes + ano
elif all(x in df.columns for x in ('DIA','MES','ANO')):
dia = str(row['DIA']).zfill(2)
mes = str(row['MES']).zfill(2)
ano = str(row['ANO'])
if len(ano) < 4:
ano = '19' + ano
res = "{0}{1}{2}".format(dia, mes, ano)
else:
# Documento sem coluna(s) com data de nascimento
res = ''
return res
def tratar_inscricao(df):
# Checa Número de Inscrição de acordo com as diferentes variações no nome da coluna e retira o '\.0' da string
if 'INSC' in df.columns:
df['INSC'] = df['INSC'].astype("string").replace('\.0', '', regex=True)
elif 'INSC_CAND' in df.columns:
df['INSC'] = df['INSC_CAND'].astype("string").replace('\.0', '', regex=True)
elif 'INSC_cand' in df.columns:
df['INSC'] = df['INSC_cand'].astype("string").replace('\.0', '', regex=True)
elif 'INSCRICAO' in df.columns:
df['INSC'] = df['INSCRICAO'].astype("string").replace('\.0', '', regex=True)
df['INSC'] = pd.to_numeric(df['INSC'], errors='coerce', downcast='integer').astype('Int64')
return df
def tratar_CPF(df):
# Checa se existe a coluna de CPF
if 'CPF' in df.columns:
df['CPF'] = df['CPF'].map(lambda cpf: str(cpf).zfill(11))
else:
df.insert(loc=1, column='CPF', value='-')
return df
def tratar_doc(df):
if any(col in df.columns for col in {'RG','DOC3'}):
df.rename({'RG':'DOC','DOC3':'DOC'}, axis=1, inplace=True)
df['DOC'] = df['DOC'].str.replace(' ','')
return df
def tratar_nome(df):
# Se o nome é dado por NOME_CAND ou NOMEOFIC, entao renomeia a coluna para NOME
if 'NOME_CAND' in df.columns:
df.rename({'NOME_CAND': 'NOME'}, axis=1, inplace=True)
elif 'NOMEOFIC' in df.columns:
df.rename({'NOMEOFIC': 'NOME'}, axis=1, inplace=True)
elif 'NOME_cand' in df.columns:
df.rename({'NOME_cand': 'NOME'}, axis=1, inplace=True)
return df
def tratar_nome_pai(df):
if 'PAI' in df.columns:
df.rename({'PAI': 'NOME_PAI'}, axis=1, inplace=True)
return df
def tratar_nome_mae(df):
if 'MAE' in df.columns:
df.rename({'MAE': 'NOME_MAE'}, axis=1, inplace=True)
return df
def tratar_nacionalidade(df):
for col in df.columns:
if col in {'NACIO','NACION','NACIONALID','NACIONALIDADE'}:
df.rename({col: 'NACIONALIDADE'}, axis=1, inplace=True)
df['NACIONALIDADE'] = pd.to_numeric(df['NACIONALIDADE'], errors='coerce', downcast='integer').astype('Int64')
df['NACIONALIDADE'].replace(0, pd.NA, inplace=True)
return df
return df
def tratar_mun_nasc(df):
for col in df.columns:
if col in {'MUNICIPIO_NASC','MU_NASC','MUNIC_NASC','CIDNASC','CIDNAS'}:
df.rename({col: 'MUN_NASC'}, axis=1, inplace=True)
df['MUN_NASC'] = df['MUN_NASC'].map(lambda mun: unidecode(str(mun)).upper() if str(mun) != '-' else '')
return df
return df
def tratar_uf_nasc(df):
for col in df.columns:
if col in {'UFNASC','EST_NASC','UFNAS'}:
df.rename({col: 'UF_NASC'}, axis=1, inplace=True)
df['UF_NASC'] = df['UF_NASC'].map(lambda uf: unidecode(str(uf)).upper() if str(uf) != '-' else '')
return df
return df
def tratar_cep(df):
for col in df.columns:
if col in {'CEP','CEPEND','CEP_END','CEP3'}:
df.rename({col: 'CEP_RESID'}, axis=1, inplace=True)
fill = df['CEP_RESID'].map(lambda cep: len(re.sub('\D','',str(cep)))).max()
fill = 8 if fill > 8 else fill
df['CEP_RESID'] = df['CEP_RESID'].map(lambda cep: re.sub('\D','',str(cep)).zfill(fill))
return df
if 'CEP_RESID' not in df.columns:
df['CEP_RESID'] = ''
return df
def tratar_mun_resid(df):
for col in df.columns:
if col in {'MUEND','MUNIC_END','MUNICIPIO','CID','CIDEND'}:
df.rename({col: 'MUN_RESID'}, axis=1, inplace=True)
df['MUN_RESID'] = df['MUN_RESID'].map(lambda mun: unidecode(str(mun)).upper())
return df
return df
def tratar_uf_resid(df):
# Se a UF de Residência é dado por UFEND, UF_END ou ESTADO, entao renomeia a coluna para UF_RESID
if 'UFEND' in df.columns:
df.rename({'UFEND': 'UF_RESID'}, axis=1, inplace=True)
elif 'UF_END' in df.columns:
df.rename({'UF_END': 'UF_RESID'}, axis=1, inplace=True)
elif 'ESTADO' in df.columns:
df.rename({'ESTADO': 'UF_RESID'}, axis=1, inplace=True)
elif 'EST' in df.columns:
df.rename({'EST': 'UF_RESID'}, axis=1, inplace=True)
return df
def tratar_opvest(df,date,path):
# Checa colunas de opção de curso no vestibular
for col in df.columns:
if any(opc in col for opc in {'OPCAO1','OP1','OPCAO1OR'}):
df.rename({col: 'OPCAO1'}, axis=1, inplace=True)
df = validacao_curso(df, 'OPCAO1', date)
if any(opc in col for opc in {'OPCAO2','OP2','OPCAO2OR'}):
df.rename({col: 'OPCAO2'}, axis=1, inplace=True)
df = validacao_curso(df, 'OPCAO2', date)
if any(opc in col for opc in {'OPCAO3','OP3'}):
df.rename({col: 'OPCAO3'}, axis=1, inplace=True)
df = validacao_curso(df, 'OPCAO3', date)
# Opcao 1 = 22 (Musica) - deve-se remapear para o codigo referente a enfase, obtida no perfil
if (date == 2001) or (date == 2002) or (date == 2003):
emphasis = pd.read_excel(path, sheet_name='perfil', usecols=['insc_cand','opcao1'], dtype=str)
emphasis['insc_cand'] = pd.to_numeric(emphasis['insc_cand'], errors='coerce', downcast='integer').astype('Int64')
df.drop(columns='OPCAO1', inplace=True)
df = df.merge(emphasis, how='inner', left_on=['INSC'], right_on=['insc_cand'])
df.rename({'opcao1':'OPCAO1'}, axis=1, inplace=True)
df['OPCAO1'] = pd.to_numeric(df['OPCAO1'], errors='coerce', downcast='integer').astype('Int64')
return df
def tratar_escola(df):
# Checa coluna de escola do ensino médio do candidato
for col in df.columns:
if col in {'NOMEESC','NOME_ESC','ESCOLAEM','ESCOLA','ESC2'}:
df.rename({col: 'ESCOLA_EM'}, axis=1, inplace=True)
df['ESCOLA_EM'] = df['ESCOLA_EM'].map(lambda esc: unidecode(str(esc)).upper() if str(esc) != '-' else '')
return df
return df
def tratar_mun_escola(df):
# Checa coluna do município da escola do ensino médio do candidato
for col in df.columns:
if col in {'MUESC','MUN_ESC','MUN_ESCOLA','MUNESC','MUNICIPIO_ESCOLA','CIDESC'}:
df.rename({col: 'MUN_ESC_EM'}, axis=1, inplace=True)
df['MUN_ESC_EM'] = df['MUN_ESC_EM'].map(lambda mun: unidecode(str(mun)).upper() if str(mun) != '-' else '')
return df
return df
def tratar_uf_escola(df):
# Checa coluna da UF onde se localiza a escola do ensino médio do candidato
for col in df.columns:
if col in {'UFESC','UF_ESC','ESTADO_ESC','ESTESC','UF_ESCOLA','ESTADO_ESCOLA','EST_ESCOLA'}:
df.rename({col: 'UF_ESCOLA_EM'}, axis=1, inplace=True)
return df
return df
def tratar_tipo_escola(df):
# Checa coluna do tipo da escola do ensino médio do candidato
for col in df.columns:
if col in {'TIPOESC','TIPO_ESC','TIPO_ESCOL','TIPO_ESCOLA'}:
df['TIPO_ESCOLA_EM'] = pd.to_numeric(df[col], errors='coerce', downcast='integer').astype('Int64')
return df
return df
def validar_ano(val, date):
if pd.isna(val):
return pd.NA
else:
return val if date-80 <= val <= date else pd.NA
def tratar_ano_conclu(df, date):
# Checa coluna do ano de conclusão do ensino médio do candidato
for col in df.columns:
if col in {'ANO_CONCLU','ANOCONC','ANO_CONC','ANO_CONCLUSAO'}:
df.rename({col: 'ANO_CONCLU_EM'}, axis=1, inplace=True)
df['ANO_CONCLU_EM'] = df['ANO_CONCLU_EM'].fillna(value='')
df['ANO_CONCLU_EM'] = df['ANO_CONCLU_EM'].map(lambda ano: '19'+str(ano) if len(str(ano)) == 2 else ano)
df['ANO_CONCLU_EM'] = pd.to_numeric(df['ANO_CONCLU_EM'], errors='coerce', downcast='integer').astype('Int64')
# usar date - 80 <= val
df['ANO_CONCLU_EM'] = df['ANO_CONCLU_EM'].apply(validar_ano, args=(date,))
return df
return df
def tratar_dados(df,date,path,ingresso=1):
# Junção da data de nascimento em 1 única coluna
df['DATA_NASC'] = df.apply(data_nasc, axis=1, args=(df,))
df['ANO_NASC'] = df['DATA_NASC'].map(lambda data: data[-4:] if data != '' else data)
df['ANO_NASC'] = pd.to_numeric(df['ANO_NASC'], errors='coerce', downcast='integer').astype('Int64')
df['ANO_NASC'] = df['ANO_NASC'].apply(validar_ano, args=(date,))
# Inserir ano da base no dataframe final
df.loc[:, 'ANO'] = date
# Inserir tipo de ingresso na Comvest
df.loc[:, 'TIPO_INGRESSO_COMVEST'] = ingresso
df = tratar_inscricao(df)
df = tratar_CPF(df)
df = tratar_doc(df)
df = tratar_nome(df)
df = tratar_nome_mae(df)
df = tratar_nome_pai(df)
df = tratar_opvest(df,date,path)
df = tratar_nacionalidade(df)
df = tratar_mun_nasc(df)
df = tratar_uf_nasc(df)
df = tratar_cep(df)
df = tratar_mun_resid(df)
df = tratar_uf_resid(df)
df = tratar_escola(df)
df = tratar_mun_escola(df)
df = tratar_uf_escola(df)
df = tratar_tipo_escola(df)
df = tratar_ano_conclu(df, date)
# Rearranja colunas e as renomeia apropriadamente
df = df.reindex(columns=['ANO','TIPO_INGRESSO_COMVEST','NOME','CPF','DOC','DATA_NASC','ANO_NASC','NOME_PAI','NOME_MAE','INSC','OPCAO1','OPCAO2','OPCAO3','NACIONALIDADE','PAIS_NASC','MUN_NASC','UF_NASC','CEP_RESID','MUN_RESID','UF_RESID','ESCOLA_EM','MUN_ESC_EM','UF_ESCOLA_EM','TIPO_ESCOLA_EM','ANO_CONCLU_EM'])
df.columns = ['ano_vest','tipo_ingresso_comvest','nome_c','cpf','doc_c','dta_nasc_c','ano_nasc_c','nome_pai_c','nome_mae_c','insc_vest','opc1','opc2','opc3','nacionalidade_c','pais_nac_c','mun_nasc_c','uf_nasc_c','cep_resid_c','mun_resid_c','uf_resid','esc_em_c','mun_esc_em_c','uf_esc_em','nat_esc_em_c','ano_conclu_em_c']
return df
# Leitura dos cursos p posterior validação
try:
df_cursos = read_result('cursos.csv')
except:
logging.warning('Couldn\'t find "cursos.csv"')
def extraction():
dados_comvest = []
for path, date in files.items():
df = read_from_db(path, sheet_name='dados', dtype=str)
progresslog('dados', date)
df = tratar_dados(df,date,path)
if date >= 2019:
vi_dados = read_from_db(path, sheet_name='vi_dados')
vo_dados = read_from_db(path, sheet_name='vo_dados')
vi_dados = tratar_dados(vi_dados, date, path, ingresso=2) # 2 - Vestibular Indigena
vo_dados = tratar_dados(vo_dados, date, path, ingresso=3) # 3 - Vagas Olimpicas
df = pd.concat([df, vi_dados, vo_dados])
if date != 2021:
ve_dados = read_from_db(path, sheet_name='ve_dados')
ve_dados = tratar_dados(ve_dados, date, path, ingresso=4) # 4 - ENEM-Unicamp
df = | pd.concat([df, ve_dados]) | pandas.concat |
from sklearn.datasets import load_breast_cancer, fetch_california_housing
import pandas as pd
import numpy as np
import pickle
import os
import collections
from sklearn.model_selection import StratifiedShuffleSplit, ShuffleSplit
def handle_categorical_feat(X_df):
''' It moves the categorical features to the last '''
original_columns = []
one_hot_columns = []
for col_name, dtype in zip(X_df.dtypes.index, X_df.dtypes):
if dtype == object:
one_hot_columns.append(col_name)
else:
original_columns.append(col_name)
X_df = X_df[original_columns + one_hot_columns]
return X_df, one_hot_columns
def load_breast_data():
breast = load_breast_cancer()
feature_names = list(breast.feature_names)
X, y = pd.DataFrame(breast.data, columns=feature_names), pd.Series(breast.target)
dataset = {
'problem': 'classification',
'full': {
'X': X,
'y': y,
},
'd_name': 'breast',
'search_lam': np.logspace(-1, 2.5, 15),
}
return dataset
def load_adult_data():
# https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data
df = pd.read_csv("./datasets/adult.data", header=None)
df.columns = [
"Age", "WorkClass", "fnlwgt", "Education", "EducationNum",
"MaritalStatus", "Occupation", "Relationship", "Race", "Gender",
"CapitalGain", "CapitalLoss", "HoursPerWeek", "NativeCountry", "Income"
]
train_cols = df.columns[0:-1]
label = df.columns[-1]
X_df = df[train_cols].copy()
# X_df = pd.get_dummies(X_df)
X_df, onehot_columns = handle_categorical_feat(X_df)
y_df = df[label].copy()
# Make it as 0 or 1
y_df.loc[y_df == ' >50K'] = 1.
y_df.loc[y_df == ' <=50K'] = 0.
y_df = y_df.astype(int)
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'adult',
'search_lam': np.logspace(-2, 2, 15),
'n_splines': 50,
'onehot_columns': onehot_columns,
}
return dataset
def load_credit_data():
# https://www.kaggle.com/mlg-ulb/creditcardfraud
df = pd.read_csv(r'./datasets/creditcard.csv')
train_cols = df.columns[0:-1]
label = df.columns[-1]
X_df = df[train_cols]
y_df = df[label]
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'credit',
'search_lam': np.logspace(-0.5, 2.5, 8),
}
return dataset
def load_churn_data():
# https://www.kaggle.com/blastchar/telco-customer-churn/downloads/WA_Fn-UseC_-Telco-Customer-Churn.csv/1
df = pd.read_csv(r'./datasets/WA_Fn-UseC_-Telco-Customer-Churn.csv')
train_cols = df.columns[1:-1] # First column is an ID
label = df.columns[-1]
X_df = df[train_cols].copy()
# Handle special case of TotalCharges wronly assinged as object
X_df['TotalCharges'][X_df['TotalCharges'] == ' '] = 0.
X_df.loc[:, 'TotalCharges'] = pd.to_numeric(X_df['TotalCharges'])
# X_df = pd.get_dummies(X_df)
X_df, onehot_columns = handle_categorical_feat(X_df)
y_df = df[label].copy() # 'Yes, No'
# Make it as 0 or 1
y_df[y_df == 'Yes'] = 1.
y_df[y_df == 'No'] = 0.
y_df = y_df.astype(int)
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'd_name': 'churn',
'search_lam': np.logspace(0, 3, 15),
'onehot_columns': onehot_columns,
}
return dataset
def load_pneumonia_data(folder='/media/intdisk/medical/RaniHasPneumonia/'):
featurename_file = os.path.join(folder, 'featureNames.txt')
col_names = pd.read_csv(featurename_file, delimiter='\t', header=None, index_col=0).iloc[:, 0].values
def read_data(file_path='pneumonia/RaniHasPneumonia/medis9847c.data'):
df = pd.read_csv(file_path, delimiter='\t', header=None)
df = df.iloc[:, :-1] # Remove the last empty wierd column
df.columns = col_names
return df
df_train = read_data(os.path.join(folder, 'medis9847c.data'))
df_test = read_data(os.path.join(folder, 'medis9847c.test'))
df = pd.concat([df_train, df_test], axis=0).reset_index(drop=True)
X_df = df.iloc[:, :-1]
y_df = df.iloc[:, -1]
dataset = {
'problem': 'classification',
'full': {
'X': X_df,
'y': y_df,
},
'test_size': 4352 / 14199,
'd_name': 'pneumonia',
'search_lam': np.logspace(0, 3, 15),
}
return dataset
def load_heart_data():
# https://www.kaggle.com/sonumj/heart-disease-dataset-from-uci
df = | pd.read_csv('./datasets/HeartDisease.csv') | pandas.read_csv |
import time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm as cm
import seaborn as sns
sns.set_style("whitegrid")
import sys
import os
from pathlib import Path
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold, GridSearchCV, StratifiedKFold,RepeatedKFold, learning_curve
from xgboost.sklearn import XGBClassifier
from utils import data_handler
from utils import bayesiantests as bt
root_dir = str(Path(os.getcwd())) #.parent
to_dir = root_dir + '/results/'
import warnings
warnings.filterwarnings('ignore')
#res= None
##------------------------------ font, fig size setup------------------------------
plt.rc('font', family='serif')
def set_fig_fonts(SMALL_SIZE=22, MEDIUM_SIZE=24,BIGGER_SIZE = 26):
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
set_fig_fonts()
##------------------------------functions----------------------------------------
def save_fig(fig, title):
to_path = data_handler.format_title(to_dir,title,'.png')
fig.savefig(to_path ,dpi=1000,bbox_inches="tight",pad_inches=0)#, bbox_inches='tight', pad_inches=10
print("Successfully saved to: ",to_path)
return to_path
def plot_correlation_matrix(X,title, col_list, toSaveFig=True):
set_fig_fonts(12,14,16)
# standardization
scaler = StandardScaler()
df_transf = scaler.fit_transform(X)
df = pd.DataFrame(df_transf,columns = col_list)
fig = plt.figure()
ax1 = fig.add_subplot(111)
cmap = cm.get_cmap('coolwarm', 30)
#cax = ax1.pcolor(df.corr(), cmap=cmap, vmin=-1, vmax=1)
mat = df.corr()
flip_mat = mat.iloc[::-1]
cax = ax1.imshow(flip_mat , interpolation="nearest", cmap=cmap,vmin=-1, vmax=1)
ax1.grid(True)
#plt.suptitle('Features\' Correlation', y =0)
labels=df.columns.tolist()
x_labels = labels.copy()
labels.reverse()
#ax1.xaxis.set_ticks_position('top')
ax1.set_xticks(np.arange(len(labels)))#np.arange(len(labels))
ax1.set_yticks(np.arange(len(labels)))
# want a more natural, table-like display
#ax1.xaxis.tick_top()
ax1.set_xticklabels(x_labels, rotation = -45, ha="left") #, , rotation = 45,horizontalalignment="left"
ax1.set_yticklabels(labels, ha="right")
#plt.xticks(rotation=90)
# Add colorbar, make sure to specify tick locations to match desired ticklabels
fig.colorbar(cax, boundaries=np.linspace(-1,1,21),ticks=np.linspace(-1,1,5))
plt.show()
if(toSaveFig):
save_fig(fig,title+'_confusion_matrix')
set_fig_fonts()
def plot_ROC_curve(pipe, tuned_parameters, title = 'roc_curve', save_csv = True,task=0):
# cross validation settup
Ntrials = 1
outter_nsplit = 10
inner_nsplit = 10
# Results store
Y_true = pd.Series(name='Y_true')
pred_results = pd.Series(name='pred_prob')
# load data
assert (task ==0 or task ==2),'Error: invalid task spec!'
X_df, Y_df = data_handler.load_XY(task)
X = X_df.values
Y = Y_df.values
for i in range(Ntrials):
train_index = []
test_index = []
outer_cv = StratifiedKFold(n_splits=outter_nsplit, shuffle=True, random_state=i)
for train_ind,test_ind in outer_cv.split(X,Y):
train_index.append(train_ind.tolist())
test_index.append(test_ind.tolist())
for j in range(outter_nsplit):#outter_nsplit
print("progress >> ",j,' / ',outter_nsplit)
X_train = X[train_index[j]]
Y_train = Y[train_index[j]]
X_test = X[test_index[j]]
Y_test = Y[test_index[j]]
inner_cv = StratifiedKFold(n_splits=inner_nsplit, shuffle=False, random_state=j)
clf = GridSearchCV(pipe,tuned_parameters, cv=inner_cv,scoring='roc_auc')
clf.fit(X_train, Y_train)
pred = pd.Series(clf.predict_proba(X_test)[:,1])
pred_results = | pd.concat([pred_results, pred], axis=0,ignore_index=True) | pandas.concat |
from hls4ml.model.hls_model import HLSModel
from hls4ml.model.hls_layers import IntegerPrecisionType, FixedPrecisionType
import matplotlib.pyplot as plt
import numpy as np
import pandas
import seaborn as sb
from hls4ml.model.hls_model import HLSModel
try:
from tensorflow import keras
import qkeras
__tf_profiling_enabled__ = True
except ImportError:
__tf_profiling_enabled__ = False
try:
import torch
__torch_profiling_enabled__ = True
except ImportError:
__torch_profiling_enabled__ = False
def array_to_summary(x, fmt='boxplot'):
if fmt == 'boxplot':
y = {'med' : np.median(x),
'q1' : np.percentile(x, 25),
'q3' : np.percentile(x, 75),
'whislo' : min(x),
'whishi' : max(x)
}
elif fmt == 'histogram':
# Power of 2 bins covering data range
high = np.ceil(np.log2(max(x))) + 1
low = np.floor(np.log2(min(x))) - 1
bits = np.arange(low, high, 1)
bins = 2 ** bits
h, b = np.histogram(x, bins=bins)
h = h * 1. / float(sum(h)) # normalize
y = {'h' : h,
'b' : np.log2(b)}
return y
def boxplot(data, fmt='longform'):
if fmt == 'longform':
f = plt.figure() #figsize=(3, 3))
hue = 'layer' if 'layer' in data.keys() else None
vp = sb.boxplot(x='x', y='weight', hue=hue, data=data[data['x'] > 0], showfliers=False)
vp.set_yticklabels(vp.get_yticklabels(), rotation=45, ha='right')
if hue is not None:
vp.get_legend().remove()
vp.set_xscale('log', base=2)
return f
elif fmt == 'summary':
from matplotlib.patches import Rectangle
medianprops = dict(linestyle='-', color='k')
f, ax = plt.subplots(1, 1)
data.reverse()
colors = sb.color_palette("Blues", len(data))
bp = ax.bxp(data, showfliers=False, vert=False, medianprops=medianprops)
# add colored boxes
for line, color in zip(bp['boxes'], colors):
x = line.get_xdata()
xl, xh = min(x), max(x)
y = line.get_ydata()
yl, yh = min(y), max(y)
rect = Rectangle((xl, yl), (xh-xl), (yh-yl), fill=True, color=color)
ax.add_patch(rect)
ax.set_yticklabels([d['weight'] for d in data])
ax.set_xscale('log', base=2)
plt.xlabel('x')
return f
else:
return None
def histogram(data, fmt='longform'):
f = plt.figure()
from matplotlib.ticker import MaxNLocator
n = len(data) if fmt == 'summary' else len(data['weight'].unique())
colors = sb.color_palette("husl", n)
if fmt == 'longform':
for i, weight in enumerate(data['weight'].unique()):
y = array_to_summary(data[data['weight'] == weight]['x'], fmt='histogram')
plt.bar(y['b'][:-1], y['h'], width=1, fill=False, label=weight, edgecolor=colors[i])
elif fmt == 'summary':
for i, weight in enumerate(data):
plt.bar(weight['b'][:-1], weight['h'], width=1, fill=False, label=weight['weight'], edgecolor=colors[i])
plt.gca().xaxis.set_major_locator(MaxNLocator(integer=True))
plt.xlabel('log2(x)')
plt.ylabel('frequency')
plt.legend()
return f
plots = {'boxplot' : boxplot,
'histogram' : histogram}
def types_boxplot(data, fmt='longform'):
from matplotlib.patches import PathPatch
from matplotlib.patches import Rectangle
ax = plt.gca()
f = plt.gcf()
# Scale the data
data['low'] = 2.**data['low']
data['high'] = 2.**data['high']
# Plot the custom precisions
ticks = np.array([tick.get_text() for tick in plt.yticks()[1]])
# Get the coordinates of the boxes to place the markers
if fmt == 'longform':
# seaborn adjusts the box positions slightly in groups
boxes = [c.get_extents().inverse_transformed(ax.transData) for c in ax.get_children() if isinstance(c, PathPatch)]
ys = [(box.y0 + box.y1) / 2 for box in boxes]
ys = [(y, y) for y in ys]
elif fmt == 'summary':
ys = [(y, y) for y in plt.yticks()[0]]
for irow, row in data[data['layer'] != 'model'].iterrows():
if row['layer'] in ticks:
iy = np.argwhere(ticks == row['layer'])[0][0] # Determine which layer in the plot
rectangle = Rectangle((row['low'], ys[iy][0]-0.4), row['high']-row['low'], 0.8, fill=True, color='grey', alpha=0.2)
ax.add_patch(rectangle)
def types_histogram(data, fmt='longform'):
ax = plt.gca()
layers = np.array(ax.get_legend_handles_labels()[1])
colors = sb.color_palette("husl", len(layers))
ylim = ax.get_ylim()
for irow, row in data[data['layer'] != 'model'].iterrows():
if row['layer'] in layers:
col = colors[np.argwhere(layers == row['layer'])[0][0]]
plt.plot((row['low'], row['low']), ylim, '--', color=col)
plt.plot((row['high'], row['high']), ylim, '--', color=col)
types_plots = {'boxplot' : types_boxplot,
'histogram' : types_histogram}
def ap_fixed_WIF(dtype):
from hls4ml.templates.vivado_template import VivadoBackend
dtype = VivadoBackend.convert_precision_string(None, dtype)
W, I, F = dtype.width, dtype.integer, dtype.fractional
return W, I, F
def types_hlsmodel(model):
suffix = ['w', 'b']
data = {'layer' : [], 'low' : [], 'high' : []}
# Plot the default precision
default_precision = model.config.model_precision['default']
# assumes ap_fixed
W, I, F = ap_fixed_WIF(default_precision)
data['layer'].append('model')
data['low'].append(-F)
data['high'].append(I-1)
for layer in model.get_layers():
for iw, weight in enumerate(layer.get_weights()):
wname = '{}/{}'.format(layer.name, suffix[iw])
T = weight.type
if T.name != 'model':
W, I, F = ap_fixed_WIF(T.precision)
data['layer'].append(wname)
data['low'].append(-F)
data['high'].append(I-1)
data = | pandas.DataFrame(data) | pandas.DataFrame |
from numpy import array, float16
from pytorch_forecasting.data import (
TimeSeriesDataSet,
)
from datetime import timedelta
import pytorch_lightning as pl
from pytorch_lightning.callbacks import (
EarlyStopping,
)
from pytorch_lightning.callbacks import LearningRateMonitor
from pytorch_forecasting.models import RecurrentNetwork
from pytorch_forecasting.metrics import RMSE
from sklearn.preprocessing import MinMaxScaler
import pandas as pd
import requests
import json
import datetime
import ast
import time
from io import StringIO
import numpy as np
pd.set_option('display.max_columns', 20)
pd.set_option('display.width', 2000)
#Note: Usage Example at very bottom.
# ******************************************************************************************************************
# ********************************************** SMARD+MONTEL DATAS ************************************************
# ******************************************************************************************************************
def get_data_for_prediction(requestedTimeStamp,
numberOfDaysInPast=60):
"""
:param requestedTimeStamp: Date and Time of the request. Should be a pandas datetime object
:param numberOfDaysInPast: Int value of days in the past needed for prediction
:return: Full dataset of required information
Output Columns: (['Wind Onshore[MWh]', 'Steinkohle[MWh]', 'Erdgas[MWh]',
'Gesamt[MWh]', 'Value', 'Base', 'Peak']
"""
endDate = requestedTimeStamp.strftime('%Y-%m-%d')
startDate = requestedTimeStamp - datetime.timedelta(days=numberOfDaysInPast)
montelStartDate = startDate.strftime('%Y-%m-%d')
# Get MONTEL API DATA
montelApiDf = getDataFromAPI_HourlyIntervals(startDate=montelStartDate, endDate=endDate)
begin_timestamp = startDate # From last Value of data
end_timestamp = str(montelApiDf.iloc[-1].name)
montelMissingData = montelApiDf.loc[begin_timestamp:end_timestamp]
# GET SMARD DATA
realizedPower = [1004071, 1004067, 1004069, 1004070]
realizedConsumption = [5000410]
#5000410
modules_realized = realizedPower
modules_consumed = realizedConsumption
Days_behind = numberOfDaysInPast + 1
EnergyProd = requestSmardData(modulIDs=modules_realized,
timestamp_from_in_milliseconds=(int(time.time()) * 1000) - (
Days_behind * 24 * 3600) * 1000)
EnergyUsage = requestSmardData(modulIDs=modules_consumed,
timestamp_from_in_milliseconds=(int(time.time()) * 1000) - (
Days_behind * 24 * 3600) * 1000)
# CLEAN UP DATA. REMOVE '-' from unknowns
EnergyUsage['Datum'] = EnergyUsage['Datum'] + '-' + EnergyUsage['Uhrzeit']
EnergyUsage = EnergyUsage.drop(columns=['Uhrzeit'])
EnergyUsage['Datum'] = pd.to_datetime(EnergyUsage['Datum'], format='%d.%m.%Y-%H:%M')
EnergyUsage = EnergyUsage.rename(columns={'Datum': 'Date', 'Gesamt (Netzlast)[MWh]': 'Gesamt[MWh]'})
EnergyUsage['Gesamt[MWh]'] = (EnergyUsage['Gesamt[MWh]'].replace('-', np.nan)).astype(np.float64)
EnergyProd['Datum'] = EnergyProd['Datum'] + '-' + EnergyProd['Uhrzeit']
EnergyProd = EnergyProd.drop(columns=['Uhrzeit'])
EnergyProd['Datum'] = pd.to_datetime(EnergyProd['Datum'], format='%d.%m.%Y-%H:%M')
EnergyProd = EnergyProd.rename(columns={'Datum': 'Date'})
EnergyProd['Wind Onshore[MWh]'] = (EnergyProd['Wind Onshore[MWh]'].replace('-', np.nan)).astype(np.float64)
EnergyProd['Steinkohle[MWh]'] = (EnergyProd['Steinkohle[MWh]'].replace('-', np.nan)).astype(np.float64)
EnergyProd['Erdgas[MWh]'] = (EnergyProd['Erdgas[MWh]'].replace('-', np.nan)).astype(np.float64)
EnergyProd['Pumpspeicher[MWh]'] = (EnergyProd['Pumpspeicher[MWh]'].replace('-', np.nan)).astype(np.float64)
EnergyUsage = EnergyUsage.resample('H', on='Date').mean()
EnergyProd = EnergyProd.resample('H', on='Date').mean()
# Remove Duplicates
EnergyProd = EnergyProd.loc[~EnergyProd.index.duplicated(keep='first')]
EnergyUsage = EnergyUsage.loc[~EnergyUsage.index.duplicated(keep='first')]
montelMissingData = montelMissingData.loc[~montelMissingData.index.duplicated(keep='first')]
MissingDataset = pd.concat([EnergyProd,EnergyUsage, montelMissingData], axis=1)
MissingDataset = MissingDataset.dropna()
return MissingDataset
# ******************************************************************************************************************
# ********************************************** SMARD DATA REQUEST ************************************************
# ******************************************************************************************************************
def requestSmardData(
modulIDs=[8004169],
timestamp_from_in_milliseconds=(int(time.time()) * 1000) - (3 * 3600) * 1000,
timestamp_to_in_milliseconds=(int(time.time()) * 1000),
region="DE",
language="de",
type="discrete"
):
'''
Requests and returns a dataframe of SMARD.de data
:param modulIDs: ID of desired modules
:param timestamp_from_in_milliseconds: Time from current
:param timestamp_to_in_milliseconds: Desired timepoint
:param region: Region of data
:param language: Language of data
:param type: Type of data
:return: Dataframe
'''
# http request content
url = "https://www.smard.de/nip-download-manager/nip/download/market-data"
body = json.dumps({
"request_form": [
{
"format": "CSV",
"moduleIds": modulIDs,
"region": region,
"timestamp_from": timestamp_from_in_milliseconds,
"timestamp_to": timestamp_to_in_milliseconds,
"type": type,
"language": language
}]})
# http response
data = requests.post(url, body)
# create pandas dataframe out of response string (csv)
df = pd.read_csv(StringIO(data.text), sep=';')
return df
# ******************************************************************************************************************
# ********************************************** MONTEL API REQUEST ************************************************
# ******************************************************************************************************************
def getDataFromAPI_HourlyIntervals(startDate, endDate):
"""
Input Data should be in the following form:
year-month-day
:param startDate: '2015-01-01'
:param endDate: '2019-01-01'
:return: Montel Api Dataframe in 15min intervals
"""
def repeatlist(list_before, i):
list_after = [val for val in list_before for k in range(i)]
return list_after
# Get Bearer Token
page = requests.get('https://coop.eikon.tum.de/mbt/mbt.json')
dictsoup = (ast.literal_eval(page.text))
token = str((dictsoup['access_token']))
# token = "<KEY>"
url = 'http://api.montelnews.com/spot/getprices'
headers = {"Authorization": 'Bearer ' + token}
params = {
'spotKey': '14',
'fields': ['Base', 'Peak', 'Hours'],
'fromDate': str(startDate),
'toDate': str(endDate),
'currency': 'eur',
'sortType': 'Ascending'
}
response = requests.get(url, headers=headers, params=params)
data = response.json()
value = []
Timespan = []
date = []
base = []
peak = []
for parts in data['Elements']: # if we create extrenal, can hold data in data1
date.append(parts['Date'])
base.append(parts['Base'])
peak.append(parts['Peak'])
for df in parts['TimeSpans']:
value.append(df['Value'])
Timespan.append(df['TimeSpan'])
date = repeatlist(date, 24)
base = repeatlist(base, 24)
peak = repeatlist(peak, 24)
MontelData = pd.DataFrame(list(zip(date, Timespan, value, base, peak)),
columns=['Date', 'Timespan', 'Value', 'Base', 'Peak'])
MontelData[['time', 'end']] = MontelData['Timespan'].str.split('-', 1, expand=True)
MontelData = MontelData.drop(columns=['Timespan', 'end'])
MontelData['Date'] = MontelData['Date'].str.replace('T00:00:00.0000000', '')
MontelData['Date'] = MontelData['Date'] + '-' + MontelData['time']
#MontelData['Date'] = MontelData[~MontelData['Date'].str.contains('dst')]
MontelData = MontelData.drop(columns=['time'])
MontelData['Date'] = pd.to_datetime(MontelData['Date'], format='%Y-%m-%d-%H:00')
MontelData15 = MontelData.set_index('Date')
MontelData15 = MontelData15.resample('H').mean()
MontelData15 = MontelData15.interpolate(method='time') # FINAL DATA
MontelData15 = MontelData15.dropna()
return MontelData15.loc[~MontelData15.index.duplicated(keep='first')]
# Three Datasets
# Electricity Price data from Montel
# Electricity production and consumption from Smard
# ******************************************************************************************************************
# ********************************************** MULTI VARIATE LSTM ************************************************
# ******************************************************************************************************************
# returns Dataframe of the following predicted variables inorder by hour. :
# Erdgas[MWh], Gesamt[MWh], Steinkohle[MWh], Wind Onshore[MWh], Value
# All Used variables are from either MONTELAPI or ENERGYPRODUCTION
def predict_price_LSTM(targetDatetime,
pathToCheckpoint,
historicalDays=60,
makePredicition=True,
loadFromCheckpoint=1,
trainingEnabled=0,
gpuEnabled=0,
batch_size=16,
loss_Function=RMSE(),
epochsNumber=90,
numberLayers=2,
hiddenSize=512,
numWorkers=8
):
"""
:param targetDatetime: Date and time of requested day to predict. Should be given as a pandas datetime object
:param pathToCheckpoint: Computer Path to the LSTM model Checkpoint
:param historicalDays: Number of days prior to the requested day to predict. Minimum number = 14. Default = 21
:param makePredicition: Set Equal to True if you want a prediction at the output. Default = True
:param loadFromCheckpoint: If activated, Checkpoint will be loaded into model. Default = 1
:param trainingEnabled: If activated, training will be enabled. Default = 0
:param gpuEnabled: If gpu available, Model will be trained with GPU at target position Default = 0
:param batch_size: For training. Default = 16
:param loss_Function: Loss function for training. Default = RMSE
:param epochsNumber: Number of epochs for training. Default = 90
:param numberLayers: Number of layers in model to be created. Default = 2
:param hiddenSize: Number of hidden states in lstm. Default = 512
:param numWorkers: number of workers specified for dataloader. Default = 8
:return: Returns a dataframe of predicted values 1 hour intervals.
Also return individual steps of 1 hour, 1 day and 1 week ahead predictions
"""
# ProcessFlags
hourlyData = 1
if loadFromCheckpoint == 1:
chk_path = pathToCheckpoint
if hourlyData == 1:
max_prediction_length = 168 # forecast 1 week
max_encoder_length = 168 * 2 # use 2 weeks of history
data = get_data_for_prediction(targetDatetime, historicalDays)
data['GroupID'] = 'A'
data['time_idx'] = array(range(data.shape[0]))
data.reset_index(level=0, inplace=True)
Array1= np.array(data['Wind Onshore[MWh]'])
Array2= np.array(data['Steinkohle[MWh]'])
Array3= np.array(data['Erdgas[MWh]'])
pos=0
for i in Array1:
if int(i)<10:
Array1[pos] = i * 1000
pos =pos+1
pos=0
for i in Array2:
if int(i) < 10:
Array2[pos] = i * 1000
pos = pos + 1
pos = 0
for i in Array3:
if int(i) < 10:
Array3[pos] = i * 1000
pos = pos + 1
data.drop(columns={'Wind Onshore[MWh]','Steinkohle[MWh]','Erdgas[MWh]'})
data['Wind Onshore[MWh]']= Array1
data['Steinkohle[MWh]']= Array2
data['Erdgas[MWh]']= Array3
#print(data)
# data['Date'] = pd.to_datetime(data['Date'], format='%d/%m/%Y %H:00')
# **************************************************************************************************************
# ********************************************* PREPROCESSING **************************************************
# **************************************************************************************************************
# fill in any missing values historical data may have
training_cutoff = data["Date"].max() - timedelta(days=7)
groupind = data['GroupID']
groupind2 = data['time_idx']
groupind3 = data['Date']
data = data.drop(columns=['GroupID', 'time_idx', 'Date'])
data = data.apply(lambda x: x.fillna(x.mean()))
data = pd.concat([data, groupind], axis=1)
data = | pd.concat([data, groupind2], axis=1) | pandas.concat |
import copy
import json
import logging
import os
import colorcet as cc
import pandas as pd
import pyproj
import pytoml
import tornado
import tornado.escape
import yaml
from bokeh.layouts import row, widgetbox, layout
from bokeh.models import Select, CustomJS, Jitter, DataTable, TableColumn, Slider, Button
# noinspection PyUnresolvedReferences
from bokeh.palettes import linear_palette
from bokeh.plotting import figure, ColumnDataSource
from bokeh.themes import Theme
from bokeh.tile_providers import STAMEN_TERRAIN
log = logging.getLogger(__name__)
def modify_doc(doc):
SIZES = list(range(6, 22, 3))
# define available palettes
palettes = {k: v for k, v in cc.palette.items() if
("_" not in k and
k not in ["bkr", "coolwarm", "bjy", "bky", "gwv"])}
#################
# data handling #
#################
def get_data(path, force_discrete_colorable):
"""Read data from csv and transform map coordinates."""
data = pd.read_csv(path)
# data from columns in force_discrete_colorable will be treated as discrete even if numeric
for col in data.columns:
if col in force_discrete_colorable:
data[col] = data[col].apply(str)
data = data.applymap(lambda x: "NaN" if pd.isnull(x) else x)
# transform coords to map projection
wgs84 = pyproj.Proj(init="epsg:4326")
web_mer = pyproj.Proj(init="epsg:3857")
data["easting"] = "NaN"
data["northing"] = "NaN"
data["easting"] = data["easting"].astype("float64")
data["northing"] = data["northing"].astype("float64")
data.loc[pd.notnull(data["lon"]), "easting"], data.loc[pd.notnull(data["lat"]), "northing"] = zip(
*data.loc[pd.notnull(data["lon"]) & pd.notnull(data["lat"])].apply(
lambda x: pyproj.transform(wgs84, web_mer, x["lon"], x["lat"]), axis=1))
# show unknown locations on map in antarctic
default_wgs84 = config.get('default_coords') or {'lon': 0, 'lat': -80}
default_web_mer = dict(zip(("lon", "lat"),
pyproj.transform(wgs84, web_mer, default_wgs84["lon"], default_wgs84["lat"])))
data.easting = data.easting.apply(lambda x: default_web_mer["lon"] if pd.isnull(x) else x)
data.northing = data.northing.apply(lambda x: default_web_mer["lat"] if pd.isnull(x) else x)
return data
def update_df(_df, _size, _color, _palette, _continuous, _discrete_sizeable, _discrete_colorable):
"""update the size and color columns of the given df based on widget selections and column classifications"""
_df["size"] = 9
if _size != 'None' and _size in _discrete_sizeable:
values = _df[_size][pd.notnull(_df[_size])].unique()
if all([val.isnumeric() for val in values]):
values = sorted(values, key=lambda x: float(x))
codes = dict(zip(values, range(len(values))))
groups = [codes[val] for val in _df[_size].values]
_df["size"] = [SIZES[xx] for xx in groups]
elif _size != 'None' and _size in _continuous:
try:
groups = pd.qcut(_df[_size].values, len(SIZES))
except ValueError:
groups = pd.cut(_df[_size].values, len(SIZES))
_df["size"] = [SIZES[xx] for xx in groups.codes]
_df["color"] = "#31AADE"
if _color != 'None' and _color in _discrete_colorable:
values = _df[_color][pd.notnull(_df[_color])].unique()
colors = linear_palette(palettes[_palette], len(values))
if all([val.isnumeric() for val in values]):
values = sorted(values, key=lambda x: float(x))
codes = dict(zip(values, range(len(values))))
groups = [codes[val] for val in _df[_color].values]
_df["color"] = [colors[xx] for xx in groups]
elif _color != 'None' and _color in _continuous:
colors = palettes[_palette]
groups = pd.cut(_df[_color].values, len(colors))
_df["color"] = [colors[xx] for xx in groups.codes]
def create_source(_df, _size, _color, _palette, _continuous, _discrete_sizeable, _discrete_colorable):
"""Update df and return new ColumnDataSource."""
update_df(_df, _size, _color, _palette, _continuous, _discrete_sizeable, _discrete_colorable)
_df["ns"] = _df["northing"]
_df["es"] = _df["easting"]
# create a ColumnDataSource from the data set
return ColumnDataSource(_df)
def update_source(_source, _df, _size, _color, _palette, _continuous, _discrete_sizeable, _discrete_colorable):
"""update df and and propagate changes to source"""
update_df(_df, _size, _color, _palette, _continuous, _discrete_sizeable, _discrete_colorable)
# create a ColumnDataSource from the data set
_source.data.update({"size": _df["size"], "color": _df["color"]})
#######################
# Data Visualizations #
#######################
def create_crossfilter(_df, _source, _discrete, _x, _y):
"""Return a crossfilter plot linked to ColumnDataSource '_source'."""
kw = dict()
if _x in _discrete:
values = _df[_x][ | pd.notnull(_df[_x]) | pandas.notnull |
import pytest
from dppd import dppd
import pandas as pd
import numpy as np
import pandas.testing
from plotnine.data import mtcars, diamonds
from collections import OrderedDict
assert_series_equal = pandas.testing.assert_series_equal
def assert_frame_equal(left, right, check_column_order=True, **kwargs):
if not check_column_order:
assert set(left.columns) == set(right.columns)
left = left.loc[:, right.columns]
return pandas.testing.assert_frame_equal(left, right, **kwargs)
dp, X = dppd()
__author__ = "<NAME>"
__copyright__ = "<NAME>"
__license__ = "mit"
def ordered_DataFrame(d, index=None):
"""Prior to pandas 0.23 (and python 3.6) the order
of columns in a DataFrame only followed the definition order for OrderedDicts.
"""
od = OrderedDict(d)
return pd.DataFrame(od, index=index)
def test_head():
df = pd.DataFrame({"a": list(range(10))})
actual = dp(df).head(5).pd
should = df.head(5)
assert_frame_equal(should, actual)
def test_ends():
df = pd.DataFrame({"a": list(range(10))})
actual = dp(df).ends(2).pd
should = df.head(2)
should = should.append(df.tail(2))
assert_frame_equal(should, actual)
def test_2_stage_concat():
df = pd.DataFrame({"a": list(range(10))})
a = dp(df).head(5).pd
actual = dp(df).concat(a).pd
should = pd.concat([df, a], axis=0)
assert_frame_equal(should, actual)
def test_list_concat():
df = pd.DataFrame({"a": list(range(10))})
should = pd.concat([df, df, df], axis=0)
actual = dp(df).concat([df, df]).pd
assert_frame_equal(should, actual)
def test_arrange():
df = pd.DataFrame(
{
"a": [str(x) for x in (range(10))],
"bb": list(range(10)),
"ccc": list(range(10)),
}
).set_index("a")
should = df.sort_values("bb", ascending=False)
actual = dp(df).arrange("bb").pd
assert_frame_equal(should, actual)
def test_arrange_column_spec():
df = pd.DataFrame(
{
"a": [str(x) for x in (range(10))],
"bb": list(range(10)),
"ccc": list(range(10)),
}
).set_index("a")
should = df.sort_values("bb", ascending=False)
actual = dp(df).arrange([x for x in X.columns if len(x) == 2]).pd
assert_frame_equal(should, actual)
def test_arrange_column_spec_empty():
with pytest.raises(ValueError):
dp(mtcars).arrange(X.columns.str.startswith("nosuchcolumn"))
def test_arrange_grouped_column_spec_empty():
with pytest.raises(ValueError):
dp(mtcars).groupby("cyl").arrange(lambda x: "nosuchcolumn" in x)
def test_arrange_column_spec_inverse():
actual = dp(mtcars).select("hp").arrange("-hp").pd
should = mtcars.sort_values("hp", ascending=True)[["hp"]]
assert_frame_equal(should, actual)
def test_arrange_kind_allowed():
with pytest.raises(ValueError):
dp(mtcars).select(["hp", "qsec"]).arrange("hp", "qsec")
def test_arrange_column_spec_inverse2():
actual = dp(mtcars).select(["hp", "qsec"]).arrange(["hp", "qsec"]).pd
should = mtcars.sort_values(["hp", "qsec"], ascending=[False, False])[
["hp", "qsec"]
]
assert_frame_equal(should, actual)
actual = dp(mtcars).select(["hp", "qsec"]).arrange(["-hp", "qsec"]).pd
should = mtcars.sort_values(["hp", "qsec"], ascending=[True, False])[["hp", "qsec"]]
assert_frame_equal(should, actual)
actual = dp(mtcars).select(["hp", "qsec"]).arrange(["hp", "-qsec"]).pd
should = mtcars.sort_values(["hp", "qsec"], ascending=[False, True])[["hp", "qsec"]]
assert_frame_equal(should, actual)
def test_mutate():
df = pd.DataFrame(
{"a": [str(x) for x in (range(10))], "bb": 10, "ccc": list(range(20, 30))}
).set_index("a")
should = df.assign(d=list(range(30, 40)))
actual = dp(df).mutate(d=X["ccc"] + X["bb"]).pd
assert_frame_equal(should, actual)
def test_transmutate():
df = pd.DataFrame(
{"a": [str(x) for x in (range(10))], "bb": 10, "ccc": list(range(20, 30))}
).set_index("a")
should = pd.DataFrame({"d": list(range(30, 40))}).set_index(df.index)
actual = dp(df).transmutate(d=X["ccc"] + X["bb"]).pd
assert_frame_equal(should, actual)
def test_distinct_dataFrame():
df = pd.DataFrame({"a": list(range(5)) + list(range(5)), "b": list(range(10))})
should = df.head(5)
actual = dp(df).distinct("a").pd
assert_frame_equal(should, actual)
def test_distinct_dataFrame_all_columns():
df = pd.DataFrame({"a": list(range(5)) + list(range(5)), "b": list(range(10))})
should = df
actual = dp(df).distinct().pd
assert_frame_equal(should, actual)
df = pd.DataFrame({"a": list(range(5)) + list(range(5))})
should = df.head(5)
actual = dp(df).distinct().pd
assert_frame_equal(should, actual)
def test_distinct_series():
a = pd.Series(["a", "a", "b", "c", "d", "b"])
should = a.iloc[[0, 2, 3, 4]]
actual = dp(a).distinct().pd
assert_series_equal(should, actual)
def test_filter():
actual = dp(mtcars).filter_by(X.name.str.contains("Merc")).pd
should = mtcars[mtcars.name.str.contains("Merc")]
assert_frame_equal(should, actual)
def test_filter_combo():
actual = dp(mtcars).filter_by(X.name.str.contains("Merc") & (X.hp > 62)).pd
should = mtcars[mtcars.name.str.contains("Merc") & (mtcars.hp > 62)]
assert_frame_equal(should, actual)
def test_add_count():
df = pd.DataFrame({"x": [1, 5, 2, 2, 4, 0, 4], "y": [1, 2, 3, 4, 5, 6, 5]})
actual = dp(df).add_count().pd
should = pd.DataFrame(
OrderedDict(
[
("x", [1, 5, 2, 2, 4, 0, 4]),
("y", [1, 2, 3, 4, 5, 6, 5]),
("count", len(df)),
]
)
)
# should.index = [5, 0, 2, 3, 4, 6, 1]
assert_frame_equal(should, actual)
def test_groupby_add_count():
df = pd.DataFrame({"x": [1, 5, 2, 2, 4, 0, 4], "y": [1, 2, 3, 4, 5, 6, 5]})
actual = dp(df).groupby("x").add_count().ungroup().pd
should = ordered_DataFrame(
{
"x": [1, 5, 2, 2, 4, 0, 4],
"y": [1, 2, 3, 4, 5, 6, 5],
"count": [1, 1, 2, 2, 2, 1, 2],
}
)
# should.index = [5, 0, 2, 3, 4, 6, 1]
assert_frame_equal(should, actual)
def test_groupby_head():
actual = dp(mtcars).groupby("cyl").head(1).select("name").pd
should = (
pd.DataFrame(
{
"name": ["Datsun 710", "Mazda RX4", "Hornet Sportabout"],
"cyl": [4, 6, 8],
"idx": [2, 0, 4],
}
)
.set_index("idx")
.sort_index()[["name"]]
)
should.index.name = None
assert_frame_equal(should, actual)
def test_groupby_ungroup_head():
actual = dp(mtcars).groupby("cyl").identity().ungroup().head(1).pd
should = mtcars.iloc[[0]]
should = should[["cyl"] + [x for x in should.columns if x != "cyl"]]
assert_frame_equal(should, actual)
def test_ungroup_on_non_grouped_raises():
with pytest.raises(AttributeError):
dp(mtcars).ungroup()
def test_groupby_summarise():
actual = dp(mtcars).groupby("cyl").summarise(("name", len, "count")).pd
should = (
pd.DataFrame({"cyl": [4, 6, 8], "count": [11, 7, 14]})
.set_index("cyl")
.reset_index()
)
assert_frame_equal(should, actual)
def test_sorting_within_groups():
actual = dp(mtcars).groupby(X.cyl).arrange("qsec").ungroup().pd
should = mtcars.sort_values(["cyl", "qsec"])
should = should[actual.columns]
assert_frame_equal(should, actual)
def test_sorting_within_groups_head():
actual = dp(mtcars).groupby(X.cyl).print().sort_values("qsec").tail(1).pd
dfs = []
for cyl, sub_df in mtcars.groupby("cyl"):
sub_df = sub_df.sort_values("qsec")
dfs.append(sub_df.tail(1))
should = pd.concat(dfs)[actual.columns]
assert_frame_equal(should, actual)
def test_sorting_within_groups_head_ungroup():
actual = dp(mtcars).groupby(X.cyl).arrange("qsec").ungroup().tail(1).pd
for cyl, sub_df in mtcars.groupby("cyl"):
sub_df = sub_df.sort_values("qsec")
should = sub_df.tail(1)[actual.columns]
assert_frame_equal(should, actual)
def test_select_in_grouping_keeps_groups():
actual = dp(mtcars).groupby("cyl").select("qsec").ungroup().pd
assert (actual.columns == ["cyl", "qsec"]).all()
def test_iter_groups():
g = []
ls = []
for grp, sub_df in dp(mtcars).groupby("cyl").itergroups():
g.append(grp)
ls.append(len(sub_df))
assert g == [4, 6, 8]
assert ls == [11, 7, 14]
def test_grouped_mutate_returns_scalar_per_group():
actual = (
dp(mtcars)
.groupby("cyl")
.mutate(count={grp: len(sub_df) for (grp, sub_df) in X.itergroups()})
.select("count")
.ungroup()
.pd.sort_index()
)
should = mtcars.groupby("cyl").agg("count")["name"]
should = ordered_DataFrame(
{"cyl": mtcars.cyl, "count": [should[cyl] for cyl in mtcars.cyl]},
index=mtcars.index,
)
assert_frame_equal(should, actual)
def test_grouped_mutate_returns_scalar_per_group_str():
actual = (
dp(mtcars)
.groupby("cyl")
.mutate(count={grp: "X" + str(len(sub_df)) for (grp, sub_df) in X.itergroups()})
.select("count")
.ungroup()
.pd.sort_index()
)
should = mtcars.groupby("cyl").agg("count")["name"]
should = ordered_DataFrame(
{"cyl": mtcars.cyl, "count": ["X" + str(should[cyl]) for cyl in mtcars.cyl]},
index=mtcars.index,
)
assert_frame_equal(should, actual)
def test_grouped_mutate_returns_series_per_group():
actual = (
dp(mtcars)
.groupby("cyl")
.mutate(grp_rank={grp: sub_df.hp.rank() for (grp, sub_df) in X.itergroups()})
.select("grp_rank")
.ungroup()
.pd.sort_index()
)
ac = []
for grp, sub_df in mtcars.groupby("cyl"):
x = sub_df["hp"].rank()
ac.append(x)
ac = pd.concat(ac)
should = mtcars.assign(grp_rank=ac)[["cyl", "grp_rank"]]
assert_frame_equal(should, actual)
def test_grouped_mutate_callable():
actual = (
dp(mtcars)
.groupby("cyl")
.mutate(max_hp=lambda x: x["hp"].max())
.select(["cyl", "max_hp", "name"])
.ungroup()
.pd
)
ac = []
for grp, sub_df in mtcars.groupby("cyl"):
x = pd.Series(sub_df["hp"].max(), index=sub_df.index)
ac.append(x)
ac = pd.concat(ac)
should = mtcars.assign(max_hp=ac)[["cyl", "max_hp", "name"]].sort_values("name")
assert_frame_equal(should, actual.sort_values("name"))
def test_grouped_mutate_callable2():
actual = (
dp(mtcars)
.groupby(["cyl", "qsec"])
.mutate(max_hp=lambda x: x["hp"].max())
.select(["cyl", "max_hp", "name"])
.ungroup()
.pd
)
ac = []
for grp, sub_df in mtcars.groupby(["cyl", "qsec"]):
x = pd.Series(sub_df["hp"].max(), index=sub_df.index)
ac.append(x)
ac = pd.concat(ac)
should = mtcars.assign(max_hp=ac)[["cyl", "qsec", "max_hp", "name"]].sort_values(
"name"
)
assert_frame_equal(should, actual.sort_values("name"))
def test_grouped_mutate_returns_scalar():
actual = (
dp(mtcars)
.groupby("cyl")
.mutate(count=4)
.select("count")
.ungroup()
.pd.sort_index()
)
should = mtcars.groupby("cyl").agg("count")["name"]
should = ordered_DataFrame({"cyl": mtcars.cyl, "count": 4}, index=mtcars.index)
assert_frame_equal(should, actual)
def test_grouped_mutate_returns_series():
actual = (
dp(mtcars)
.groupby("cyl")
.mutate(count=pd.Series(range(len(mtcars))))
.select("count")
.ungroup()
.pd.sort_index()
)
should = mtcars.groupby("cyl").agg("count")["name"]
should = ordered_DataFrame(
{"cyl": mtcars.cyl, "count": pd.Series(range(len(mtcars)))}, index=mtcars.index
)
assert_frame_equal(should, actual)
def test_grouped_mutate_in_non_group():
actual = (
dp(mtcars)
.mutate(count={grp: len(sub_df) for (grp, sub_df) in X.itergroups()})
.select("count")
.pd.sort_index()
)
should = ordered_DataFrame(
{"count": [len(mtcars)] * len(mtcars)}, index=mtcars.index
)
assert_frame_equal(should, actual)
def test_grouped_mutate_in_non_group_invalid_key():
with pytest.raises(KeyError):
dp(mtcars).mutate(
count={"shu": len(sub_df) for (grp, sub_df) in X.itergroups()}
)
def test_grouped_mutate_in_non_group_multile_keys():
with pytest.raises(KeyError):
dp(mtcars).mutate(count={None: 5, "shu": "hello"})
def test_grouped_mutate_repeated_keys():
df = mtcars.copy()
df.index = list(range(16)) + list(range(16))
with pytest.raises(ValueError): # cannot reindex from duplicate axis
with dppd(df) as (ddf, X):
ddf.groupby("cyl").mutate(
grp_rank={grp: sub_df.hp.rank() for (grp, sub_df) in X.itergroups()}
)
def test_grouped_mutate_non_sorted():
actual = (
dp(mtcars)
.groupby("cyl")
.mutate(grp_rank={grp: sub_df.hp.rank() for (grp, sub_df) in X.itergroups()})
.select("grp_rank")
.ungroup()
.pd.sort_index()
)
ac = []
for grp, sub_df in mtcars.groupby("cyl"):
x = sub_df["hp"].rank()
ac.append(x)
ac = pd.concat(ac)
should = mtcars.assign(grp_rank=ac)[["cyl", "grp_rank"]]
assert_frame_equal(should, actual)
def test_groupby_two_summarize_grouped():
actual = (
dp(diamonds).groupby(["color", "cut"]).summarise(("price", len, "count")).pd
)
should = pd.DataFrame(diamonds.groupby(["color", "cut"])["price"].agg("count"))
should.columns = ["count"]
should = should.reset_index()
assert_frame_equal(should, actual)
def test_groupby_two_mutate_grouped():
actual = (
dp(mtcars)
.groupby(["cyl", "vs"])
.mutate(grp_rank={grp: sub_df.hp.rank() for (grp, sub_df) in X.itergroups()})
.select("grp_rank")
.ungroup()
.pd.sort_index()
)
ac = []
for grp, sub_df in mtcars.groupby(["cyl", "vs"]):
x = sub_df["hp"].rank()
ac.append(x)
ac = pd.concat(ac)
should = mtcars.assign(grp_rank=ac)[["cyl", "vs", "grp_rank"]]
assert_frame_equal(should, actual)
def test_select_does_not_remove_group_columns():
actual = dp(mtcars).groupby("cyl").select("name").ungroup().pd
assert (actual.columns == ["cyl", "name"]).all()
def test_unselected_group_columns_is_ignored():
actual = dp(mtcars).groupby("cyl").unselect("cyl").ungroup().pd
assert "cyl" in actual.columns
def test_dropping_non_group_columns_works():
actual = dp(mtcars).groupby("cyl").drop("name", axis=1).ungroup().pd
assert "name" not in actual.columns
def test_dropping_group_columns_is_ignored():
actual = dp(mtcars).groupby("cyl").drop("cyl", axis=1).ungroup().pd
assert "cyl" in actual.columns
def test_groupby_sort_changes_order_but_not_result():
a = (
dp(mtcars)
.groupby("cyl")
.sort_values("hp")
.mutate(count={grp: len(sub_df) for (grp, sub_df) in X.itergroups()})
.ungroup()
.pd
)
b = (
dp(mtcars)
.groupby("cyl")
.mutate(count={grp: len(sub_df) for (grp, sub_df) in X.itergroups()})
.ungroup()
.pd
)
assert_frame_equal(a, b.loc[a.index]) #
def test_groupby_sort_changes_order_but_not_result2():
a = (
dp(mtcars)
.groupby("cyl")
.sort_values("hp")
.mutate(count={grp: sub_df.hp.rank() for (grp, sub_df) in X.itergroups()})
.ungroup()
.pd
)
b = (
dp(mtcars)
.groupby("cyl")
.mutate(count={grp: sub_df.hp.rank() for (grp, sub_df) in X.itergroups()})
.ungroup()
.pd
)
assert_frame_equal(a, b.loc[a.index]) #
def test_grouped_mutate_missing_keys():
actual = (
dp(mtcars).groupby("cyl").mutate(count={4: 170, 6: 180, 8: 190}).ungroup().pd
)
assert (actual[actual.cyl == 4]["count"] == 170).all()
assert (actual[actual.cyl == 6]["count"] == 180).all()
with pytest.raises(KeyError):
dp(mtcars).groupby("cyl").mutate(count={4: 170, 6: 180}).pd
def test_grouped_2_mutate_missing_keys():
counts = {(4, 0): 40, (4, 1): 41, (6, 0): 60, (6, 1): 61, (8, 0): 80, (8, 1): 81}
actual = dp(mtcars).groupby(["cyl", "vs"]).mutate(count=counts).ungroup().pd
print(actual)
assert (actual[(actual.cyl == 4) & (actual.vs == 0)]["count"] == 40).all()
assert (actual[(actual.cyl == 4) & (actual.vs == 1)]["count"] == 41).all()
assert (actual[(actual.cyl == 6) & (actual.vs == 0)]["count"] == 60).all()
assert (actual[(actual.cyl == 6) & (actual.vs == 1)]["count"] == 61).all()
assert (actual[(actual.cyl == 8) & (actual.vs == 0)]["count"] == 80).all()
assert (actual[(actual.cyl == 8) & (actual.vs == 1)]["count"] == 81).all()
with pytest.raises(KeyError):
del counts[4, 0]
dp(mtcars).groupby(["cyl", "vs"]).mutate(count=counts).pd
def test_basic_summary():
actual = dp(mtcars).groupby("cyl").summarize((X.hp, len, "count")).pd
should = mtcars.groupby("cyl")[["hp"]].agg("count")
should.columns = ["count"]
should = should.reset_index()
assert_frame_equal(should, actual) # will fail
def test_summary_quantiles():
args = [
("disp", lambda x, q=q: x.quantile(q), "q%.2f" % q)
for q in np.arange(0, 1.1, 0.1)
]
actual = dp(mtcars).sort_values("cyl").groupby("cyl").summarise(*args).pd
lambdas = [lambda x, q=q: x.quantile(q) for q in np.arange(0, 1.1, 0.1)]
for l, q in zip(lambdas, np.arange(0, 1.1, 0.1)):
l.__name__ = "q%.2f" % q
should = (
mtcars.sort_values("cyl")
.groupby("cyl")["disp"]
.aggregate(lambdas)
.reset_index()
)
assert_frame_equal(should, actual)
def test_summary_repeated_target_names():
with pytest.raises(ValueError):
dp(mtcars).summarise((X.disp, np.mean, "one"), (X.hp, np.mean, "one")).pd
def test_empty_summarize_raises():
with pytest.raises(ValueError):
dp(mtcars).groupby("cyl").summarize()
with pytest.raises(ValueError):
dp(mtcars).summarize()
def test_summarise_non_tuple():
with pytest.raises(ValueError):
dp(mtcars).groupby("cyl").summarize(np.min)
def test_summarize_auto_name():
actual = dp(mtcars).groupby("cyl").summarize(("hp", np.min))
assert "hp_amin" in actual.columns
def test_do():
def count_and_count_unique(df):
return pd.DataFrame({"count": [len(df)], "unique": [(~df.duplicated()).sum()]})
actual = dp(mtcars).groupby("cyl").select("hp").do(count_and_count_unique).pd
should = pd.DataFrame(
OrderedDict(
[("cyl", [4, 6, 8]), ("count", [11, 7, 14]), ("unique", [10, 4, 9])]
)
)
assert_frame_equal(should, actual)
def test_do_categorical_grouping():
def count_and_count_unique(df):
return pd.DataFrame({"count": [len(df)], "unique": [(~df.duplicated()).sum()]})
actual = (
dp(mtcars)
.mutate(cyl=pd.Categorical(X.cyl))
.groupby("cyl")
.select("hp")
.do(count_and_count_unique)
.pd
)
should = pd.DataFrame(
OrderedDict(
[
("cyl", pd.Categorical([4, 6, 8])),
("count", [11, 7, 14]),
("unique", [10, 4, 9]),
]
)
)
assert_frame_equal(should, actual)
def test_do_without_group():
def count_and_count_unique(df):
return pd.DataFrame({"count": [len(df)], "unique": [(~df.duplicated()).sum()]})
actual = dp(mtcars).select("hp").do(count_and_count_unique).pd
should = pd.DataFrame({"count": [32], "unique": [22]})
assert_frame_equal(should, actual)
def test_do_group2():
def count_and_count_unique(df):
return pd.DataFrame({"count": [len(df)], "unique": [(~df.duplicated()).sum()]})
actual = (
dp(mtcars).groupby(["cyl", "am"]).select("hp").do(count_and_count_unique).pd
)
should = pd.DataFrame(
OrderedDict(
[
("cyl", {0: 4, 1: 4, 2: 6, 3: 6, 4: 8, 5: 8}),
("am", {0: 0, 1: 1, 2: 0, 3: 1, 4: 0, 5: 1}),
("count", {0: 3, 1: 8, 2: 4, 3: 3, 4: 12, 5: 2}),
("unique", {0: 3, 1: 7, 2: 3, 3: 2, 4: 7, 5: 2}),
]
)
)
assert_frame_equal(should, actual)
def test_filter_by_callable():
actual = dp(mtcars).filter_by(lambda x: x.hp > 100).pd
should = mtcars[mtcars.hp > 100]
assert_frame_equal(actual, should)
def test_filter_by_converted_column():
actual = dp(mtcars).filter_by("am").pd
should = mtcars[mtcars.am.astype(bool)]
assert_frame_equal(actual, should)
def test_filter_by_bool_column():
actual = (
dp(mtcars).mutate(rx=X.name.str.contains("RX")).filter_by("rx").select("-rx").pd
)
actual2 = (
dp(mtcars).mutate(rx=X.name.str.contains("RX")).filter_by(X.rx).select("-rx").pd
)
should = mtcars[mtcars.name.str.contains("RX")]
assert_frame_equal(should, actual)
assert_frame_equal(should, actual2)
def test_filter_by_column_raises_on_non_column():
with pytest.raises(ValueError):
dp(mtcars).filter_by("rx").pd
def test_filter_by_vector_grouped():
actual = dp(mtcars).groupby("cyl").filter_by(X.hp.rank() <= 2).ungroup().pd
keep = set()
for grp, sub_df in mtcars.groupby("cyl"):
keep.update(sub_df["name"][sub_df["hp"].rank() <= 2])
should = mtcars[mtcars.name.isin(keep)]
assert set(should.columns) == set(actual.columns)
should = should[actual.columns]
assert_frame_equal(actual, should)
def test_filter_by_callable_grouped():
actual = (
dp(mtcars).groupby("cyl").filter_by(lambda x: x.hp.rank() <= 2).ungroup().pd
)
keep = set()
for grp, sub_df in mtcars.groupby("cyl"):
keep.update(sub_df["name"][sub_df["hp"].rank() <= 2])
should = mtcars[mtcars.name.isin(keep)]
assert set(should.columns) == set(actual.columns)
should = should[actual.columns]
assert_frame_equal(actual, should)
def test_grouped_filter_by_returns_series():
actual = (
dp(mtcars)
.groupby("cyl")
.filter_by(
{
grp: sub_df.hp.rank(ascending=False) <= 2
for (grp, sub_df) in X.itergroups()
}
)
.ungroup()
.pd.sort_index()
)
keep = set()
for grp, sub_df in mtcars.groupby("cyl"):
keep.update(sub_df["name"][sub_df["hp"].rank(ascending=False) <= 2])
should = mtcars[mtcars.name.isin(keep)]
assert set(should.columns) == set(actual.columns)
should = should[actual.columns]
assert_frame_equal(should, actual)
def test_filter_by_unkown_raises():
with pytest.raises(ValueError):
dp(mtcars).filter_by(55)
def test_mutate_series():
with pytest.raises(AttributeError):
dp(mtcars).sum().mutate(a="A")
def test_groupby_select():
actual = dp(mtcars)[X.groupby("cyl").hp.rank(ascending=False) < 2].pd
should = ["Lotus Europa", "Ferrari Dino", "Maserati Bora"]
assert set(actual.name) == set(should)
def test_groupby_within_chain():
actual = dp(mtcars).groupby("cyl").mean().select("hp").pd
should = mtcars.groupby("cyl").mean()[["hp"]]
assert_frame_equal(should, actual)
def test_groupby_within_chain_select_on_group():
actual = dp(mtcars).groupby("cyl").select("hp").mean().pd
should = mtcars.groupby("cyl").mean()[["hp"]]
assert_frame_equal(should, actual)
def test_groupby_axis_1_raises_on_verb():
# this is ok
dp(mtcars).groupby(level=0, axis=1).pd
with pytest.raises(ValueError):
dp(mtcars).groupby(level=0, axis=1).select("cyl")
def test_grouped_filter_by_X_apply():
actual = dp(mtcars).groupby("cyl").filter_by(X.apply(len) > 10).ungroup().pd
g = mtcars.groupby("cyl").apply(len) > 10
should = mtcars[mtcars.cyl.isin(g.index[g])]
assert_frame_equal(should, actual, check_column_order=False)
def test_grouped_filter_by_wrong_length_of_series():
with pytest.raises(pd.core.indexing.IndexingError):
dp(mtcars).groupby("cyl").filter_by(pd.Series([True, False], index=[4, 8]))
def test__lenght_of_series():
with pytest.raises(pd.core.indexing.IndexingError):
dp(mtcars).filter_by(pd.Series([True, False], index=[4, 8]))
def test_grouped_mutate_X_apply():
actual = dp(mtcars).groupby("cyl").mutate(count=X.apply(len)).ungroup().pd
should = dp(mtcars).groupby("cyl").add_count().ungroup().pd
assert_frame_equal(should, actual, check_column_order=False, check_dtype=False)
def test_grouped_mutate_X_apply_str():
actual = (
dp(mtcars)
.groupby("cyl")
.mutate(count=X.apply(lambda x: str(len(x))))
.ungroup()
.pd
)
should = (
dp(mtcars)
.groupby("cyl")
.add_count()
.ungroup()
.mutate(count=X["count"].astype(str))
.pd
)
assert_frame_equal(should, actual, check_column_order=False)
def test_grouped_mutate_wrong_length():
with pytest.raises(pd.core.indexing.IndexingError):
dp(mtcars).groupby("cyl").mutate(count=pd.Series([True, False], index=[4, 8]))
def test_mutate_wrong_length():
with pytest.raises(pd.core.indexing.IndexingError):
dp(mtcars).groupby("cyl").mutate(count= | pd.Series([True, False], index=[4, 8]) | pandas.Series |
import re
import os
import sys
import sqlite3
import praw
import pandas as pd
from nltk.corpus import stopwords
from datetime import datetime, timedelta
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
sys.path.append(os.path.join(os.path.dirname(__file__), "../../../"))
import scheduled_tasks.reddit.config as cfg
from scheduled_tasks.reddit.get_reddit_trending_stocks.fast_yahoo import download_advanced_stats, download_quick_stats
from custom_extensions.stopwords import stopwords_list
from custom_extensions.custom_words import new_words
analyzer = SentimentIntensityAnalyzer()
analyzer.lexicon.update(new_words)
reddit = praw.Reddit(client_id=cfg.API_REDDIT_CLIENT_ID,
client_secret=cfg.API_REDDIT_CLIENT_SECRET,
user_agent=cfg.API_REDDIT_USER_AGENT)
conn = sqlite3.connect(r"database/database.db", check_same_thread=False)
db = conn.cursor()
pattern = "(?<=\$)?\\b[A-Z]{1,5}\\b(?:\.[A-Z]{1,2})?"
current_datetime = datetime.utcnow()
def round_time(time_string, base=5):
"""
Round time to the nearest base so that minute hand on time will look nicer in db
Parameters
----------
time_string: str
minute hand you wish to round: 08 -> 05
base: int
round to the nearest base
"""
rounded = str(base * round(int(time_string)/base))
if len(rounded) == 1:
rounded = "0" + rounded
return rounded
def insert_into_word_cloud_dict(text, all_words_dict):
"""
Extract all words from comment and insert into word cloud dict
Parameters
----------
text: str
comment
all_words_dict: dict
previous dict of word cloud
"""
for word in text.upper().split():
word = re.sub(r'\d|\W+', '', word)
all_words_dict[word] = all_words_dict.get(word, 0) + 1
return all_words_dict
def extract_ticker(text, tickers_dict, sentiment_dict, sentiment_score, calls_dict, calls_mentions, puts_dict,
puts_mentions):
"""
Extract tickers with correct pattern from comment and add sentiment, calls, put to previous dict
Parameters
----------
text: str
comment
tickers_dict: dict
previous dict of word cloud
sentiment_dict: dict
sentiment dict of tickers
sentiment_score: float
sentiment of comment
calls_dict: dict
calls dict of tickers
calls_mentions: bool
whether or not 'call(s)' or '100C' is mentioned
puts_dict: dict
puts dict of tickers
puts_mentions: bool
whether or not 'put(s)' or '100P' is mentioned
"""
extracted_tickers = set(re.findall(pattern, text.upper()))
for ticker in extracted_tickers:
tickers_dict[ticker] = tickers_dict.get(ticker, 0) + 1
sentiment_dict[ticker] = sentiment_dict.get(ticker, 0) + sentiment_score
if calls_mentions:
calls_dict[ticker] = calls_dict.get(ticker, 0) + 1
if puts_mentions:
puts_dict[ticker] = puts_dict.get(ticker, 0) + 1
return tickers_dict, sentiment_dict, calls_dict, puts_dict
def check_for_options(text):
"""
Check whether or not text contains anything related to options (call, put, 100C, 100P etc)
Parameters
----------
text: str
comment
"""
text = text.upper()
if re.findall("CALL|\d+C", text):
calls_mentions = True
print(re.findall("CALL|\d+C", text), text)
else:
calls_mentions = False
if re.findall("PUT|\d+P", text):
puts_mentions = True
print(re.findall("PUT|\d+P", text), text)
else:
puts_mentions = False
return calls_mentions, puts_mentions
def wsb_live():
"""
Get real time sentiment from wsb daily discussion thread
"""
current_datetime_str = str(current_datetime).rsplit(":", 1)[0]
minute_hand = round_time(current_datetime_str.split(":")[1])
current_datetime_str = current_datetime_str[:-2] + round_time(minute_hand)
threshold_datetime = datetime.timestamp(current_datetime - timedelta(minutes=10))
basic_stopwords_list = list(map(lambda x: re.sub(r'\W+', '', x.upper()), stopwords.words('english'))) + \
["THATS", "GOT", "IM", "LIKE", "STILL", "EVER", "EVEN", "CANT", "US", "THATS", "GO", "WOULD",
"MUCH", "GET", "ONE", "SEE", "WAY", "NEED", "TAKE", "MAKE", "GETTING", "GOING", "GONNA",
"NEED", "THINK", "SAY", "SAID", "KNOW", "WAY", "TIME", "WEEK", "WELL", "WANT", "THING",
"LETS", "IVE", "COULD", "ALWAYS", "FEEL", "FELT", "FEELS", "WHATS", "REALLY", "LOOK",
"GUYS", "PEOPLE", "ALREADY", "IMGEMOTET_TH"]
subreddit = reddit.subreddit("wallstreetbets")
all_words_dict = dict()
tickers_dict = dict()
sentiment_dict = dict()
calls_dict = dict()
puts_dict = dict()
for post in subreddit.hot(limit=10):
try:
# Ensure that post is stickied and the post is not an image
if post.stickied and ".jpg" not in post.url and ".png" not in post.url:
print(post.url)
submission = reddit.submission(url=post.url)
submission.comment_sort = "new"
submission.comments.replace_more(limit=0)
for comment in submission.comments:
if threshold_datetime < comment.created_utc:
comment_body = comment.body
# Get sentiment of comment
vs = analyzer.polarity_scores(comment_body)
sentiment_score = float(vs['compound'])
# print(datetime.fromtimestamp(comment.created_utc), sentiment_score, comment_body)
# Remove number/special characters (clean up word cloud)
all_words_dict = insert_into_word_cloud_dict(comment_body, all_words_dict)
# Check if calls and puts is mentioned in comment
calls_mentions, puts_mentions = check_for_options(comment_body)
# Get ticker based on pattern
tickers_dict, sentiment_dict, calls_dict, puts_dict = \
extract_ticker(comment_body, tickers_dict, sentiment_dict, sentiment_score, calls_dict,
calls_mentions, puts_dict, puts_mentions)
# Read sub-comment
for second_level_comment in comment.replies:
second_level_comment = second_level_comment.body
# Get sentiment of comment
vs = analyzer.polarity_scores(second_level_comment)
sentiment_score = float(vs['compound'])
# Insert into word cloud
all_words_dict = insert_into_word_cloud_dict(second_level_comment, all_words_dict)
# Check if calls and puts is mentioned in comment
calls_mentions, puts_mentions = check_for_options(second_level_comment)
# Get ticker based on pattern
tickers_dict, sentiment_dict, calls_dict, puts_dict = \
extract_ticker(second_level_comment, tickers_dict, sentiment_dict, sentiment_score,
calls_dict, calls_mentions, puts_dict, puts_mentions)
except:
pass
# Remove ticker if it is found in stopwords_list
tickers_dict = dict(sorted(tickers_dict.items(), key=lambda item: item[1]))
for key in list(tickers_dict.keys()):
if key in stopwords_list:
del tickers_dict[key]
# Remove word from word cloud if it is found in all_words_dict
all_words_dict = dict(sorted(all_words_dict.items(), key=lambda item: item[1]))
for key in list(all_words_dict.keys()):
if key in basic_stopwords_list:
del all_words_dict[key]
df = pd.DataFrame(all_words_dict, index=[0])
df = df.T
df.reset_index(inplace=True)
df.rename(columns={"index": "word", 0: "mentions"}, inplace=True)
# Criteria to insert into db
df = df[(df["mentions"] >= 3) & (df["word"].str.len() > 1)]
df["date_updated"] = current_datetime_str
df.to_sql("wsb_word_cloud", conn, if_exists="append", index=False)
quick_stats = {'regularMarketPreviousClose': 'prvCls',
'regularMarketVolume': 'volume',
'regularMarketPrice': 'price',
'marketCap': 'mkt_cap'}
quick_stats_df = download_quick_stats(list(tickers_dict.keys()), quick_stats, threads=True)
# Ticker must be active in order to be valid
quick_stats_df["volume"] = pd.to_numeric(quick_stats_df["volume"], errors='coerce')
quick_stats_df["price"] = | pd.to_numeric(quick_stats_df["price"], errors='coerce') | pandas.to_numeric |
import pandas as pd
import numpy as np
import json
from graphviz import Digraph
import matplotlib.pyplot as plt
class Report:
def __init__(self, scenario, file_id):
self.scenario = scenario
self.file_id = file_id
def get_path_to_hash(self):
scenario = self.scenario
return '/home/jovyan/test_data/'+scenario+'/hd2/status/hash.json'
def get_hash_data(self):
path_to_hash = self.get_path_to_hash()
with open(path_to_hash) as json_file:
data = json.load(json_file)
return data
def get_files(self):
data = self.get_hash_data()
files = [elem['file_name'] for elem in data]
return files
def get_path_to_report(self):
scenario = self.scenario
file_id = self.file_id
data = self.get_hash_data()
folder_hash = data[file_id]['original_hash']
return '/home/jovyan/test_data/'+scenario+'/hd2/data/'+folder_hash+'/report.json'
def get_dict_from_report(self):
path_to_report = self.get_path_to_report()
df0 = pd.read_json(path_to_report)
df0 = df0.reset_index()
return df0.iloc[3]['gw:GWallInfo']
def get_df_from_report(self):
info = self.get_dict_from_report()
return pd.DataFrame.from_dict(info)
def print_document_summary(self):
info = self.get_dict_from_report()
d = info['gw:DocumentSummary']
for key in d:
d[key] = [d[key]]
document_summary = pd.DataFrame.from_dict(d)
document_summary.rename(columns={'gw:TotalSizeInBytes':'Total Size In Bytes',
'gw:FileType':'File Type',
'gw:Version':'Version'}, inplace=True)
print('Total Size In Bytes :', document_summary['Total Size In Bytes'].iloc[0])
print('File Type :', document_summary['File Type'].iloc[0])
print('Version :', document_summary['Version'].iloc[0])
def print_extracted_items(self):
info = self.get_dict_from_report()
d = info['gw:ExtractedItems']
for key in d:
d[key] = [d[key]]
extracted_items = pd.DataFrame.from_dict(d)
extracted_items.rename(columns={'@itemCount':'Item Count'}, inplace=True)
print("Item Count :", extracted_items['Item Count'].iloc[0])
def content_management_policy_df(self):
info = self.get_dict_from_report()
d = info['gw:ContentManagementPolicy']['gw:Camera']
df0 = pd.DataFrame.from_dict(d)
data = info['gw:ContentManagementPolicy']['gw:Camera'][0]['gw:ContentSwitch']
if len(data) == 2:
for key in data:
data[key] = [data[key]]
df = pd.DataFrame.from_dict(data)
df['@cameraName'] = df0.iloc[0]['@cameraName']
df = df[['@cameraName', 'gw:ContentName', 'gw:ContentValue']]
for i in range(1, len(df0)):
data = info['gw:ContentManagementPolicy']['gw:Camera'][i]['gw:ContentSwitch']
if len(data) == 2:
for key in data:
data[key] = [data[key]]
df1 = pd.DataFrame.from_dict(data)
df1['@cameraName'] = df0.iloc[i]['@cameraName']
df1 = df1[['@cameraName', 'gw:ContentName', 'gw:ContentValue']]
df = pd.concat([df, df1], ignore_index=True)
df.rename(columns={'@cameraName':'Camera Name',
'gw:ContentName':'Content Name',
'gw:ContentValue':'Content Value'}, inplace=True)
return df
def camera_graph(self, camera_value):
content_management_policy = self.content_management_policy_df()
gra = Digraph()
# root node
elem = camera_value
gra.node(elem, shape='box')
df0 = content_management_policy[content_management_policy['Camera Name']==elem]
content_name = list(df0['Content Name'].unique())
with gra.subgraph() as i:
i.attr(rank='same')
for elem2 in content_name:
i.node(elem2, shape='box')
for elem2 in content_name:
df00 = df0[df0['Content Name']==elem2]
k = int(df00.index[0])
text = df00.iloc[0]['Content Value']
gra.node(str(k), text, shape='box')
gra.edge(elem2, str(k))
for elem3 in df0['Content Name']:
gra.edge(elem, elem3)
return gra
def get_num_of_groups(self, text=False):
info = self.get_dict_from_report()
num_groups = info['gw:ContentGroups']['@groupCount']
if text:
print("There are " + num_groups + " groups")
else:
return num_groups
def content_groups_df(self):
info = self.get_dict_from_report()
d = info['gw:ContentGroups']['gw:ContentGroup'][0]['gw:ContentItems']['gw:ContentItem']
df = pd.DataFrame.from_dict(d)
df['gw:BriefDescription'] = info['gw:ContentGroups']['gw:ContentGroup'][0]['gw:BriefDescription']
df = df[['gw:BriefDescription', 'gw:TechnicalDescription', 'gw:InstanceCount', 'gw:TotalSizeInBytes', 'gw:AverageSizeInBytes', 'gw:MinSizeInBytes', 'gw:MaxSizeInBytes']]
num_groups = self.get_num_of_groups()
for i in range(1, int(num_groups)):
df1 = pd.DataFrame.from_dict(d)
df1['gw:BriefDescription'] = info['gw:ContentGroups']['gw:ContentGroup'][i]['gw:BriefDescription']
df1 = df1[['gw:BriefDescription', 'gw:TechnicalDescription', 'gw:InstanceCount', 'gw:TotalSizeInBytes', 'gw:AverageSizeInBytes', 'gw:MinSizeInBytes', 'gw:MaxSizeInBytes']]
df = pd.concat([df, df1], ignore_index=True)
df.rename(columns={'gw:BriefDescription':'Brief Description',
'gw:TechnicalDescription':'Technical Description',
'gw:InstanceCount':'Instance Count',
'gw:TotalSizeInBytes':'Total Size In Bytes',
'gw:AverageSizeInBytes':'Average Size In Bytes',
'gw:MinSizeInBytes':'Min Size In Bytes',
'gw:MaxSizeInBytes':'Max Size In Bytes'}, inplace=True)
return df
def group_df(self, group_value):
content_groups = self.content_groups_df()
df0 = content_groups[content_groups['Brief Description']==group_value]
df1 = df0.set_index('Technical Description')
df1["Instance Count"] = pd.to_numeric(df1["Instance Count"])
df1["Total Size In Bytes"] = pd.to_numeric(df1["Total Size In Bytes"])
df1["Average Size In Bytes"] = pd.to_numeric(df1["Average Size In Bytes"])
df1["Min Size In Bytes"] = | pd.to_numeric(df1["Min Size In Bytes"]) | pandas.to_numeric |
# Copyright 2020 KCL-BMEIS - King's College London
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, IO, Optional, Tuple, Union
import os
import uuid
from datetime import datetime, timezone
import time
import warnings
import numpy as np
import pandas as pd
import h5py
from exetera.core.abstract_types import Field, AbstractSession
from exetera.core import operations
from exetera.core import persistence as per
from exetera.core import fields as fld
from exetera.core import readerwriter as rw
from exetera.core import validation as val
from exetera.core import operations as ops
from exetera.core import dataset as ds
from exetera.core import dataframe as df
class Session(AbstractSession):
"""
Session is the top-level object that is used to create and open ExeTera Datasets. It also
provides operations that can be performed on Fields. For a more detailed explanation of
Session and examples of its usage, please refer to
https://github.com/KCL-BMEIS/ExeTera/wiki/Session-API
:param chunksize: Change the default chunksize that fields created with this dataset use.
Note this is a hint parameter and future versions of Session may choose to ignore it if it
is no longer required. In general, it should only be changed for testing.
:param timestamp: Set the official timestamp for the Session's creation rather than taking
the current date/time.
"""
def __init__(self,
chunksize: int = ops.DEFAULT_CHUNKSIZE,
timestamp: str = str(datetime.now(timezone.utc))):
"""
Create a new Session object.
"""
if not isinstance(timestamp, str):
error_str = "'timestamp' must be a string but is of type {}"
raise ValueError(error_str.format(type(timestamp)))
self.chunksize = chunksize
self.timestamp = timestamp
self.datasets = dict()
def __enter__(self):
"""Context manager enter."""
return self
def __exit__(self, etype, evalue, etraceback):
"""Context manager exit closes any open datasets."""
self.close()
def open_dataset(self,
dataset_path: Union[str, IO[bytes]],
mode: str,
name: str):
"""
Open a dataset with the given access mode.
:param dataset_path: the path to the dataset
:param mode: the mode in which the dataset should be opened. This is one of "r", "r+" or "w".
:param name: the name that is associated with this dataset. This can be used to retrieve the dataset when
calling :py:meth:`~session.Session.get_dataset`.
:return: The top-level dataset object
"""
h5py_modes = {"r": "r", "r+": "r+", "w": "w"}
if name in self.datasets:
raise ValueError("A dataset with name '{}' is already open, and must be closed first.".format(name))
self.datasets[name] = ds.HDF5Dataset(self, dataset_path, mode, name)
return self.datasets[name]
def close_dataset(self,
name: str):
"""
Close the dataset with the given name. If there is no dataset with that name, do nothing.
:param name: The name of the dataset to be closed
:return: None
"""
if name in self.datasets:
self.datasets[name].close()
del self.datasets[name]
def list_datasets(self):
"""
List the open datasets for this Session object. This is returned as a tuple of strings
rather than the datasets themselves. The individual datasets can be fetched using
:py:meth:`~exetera.session.Session.get_dataset`.
Example::
names = s.list_datasets()
datasets = [s.get_dataset(n) for n in names]
:return: A tuple containing the names of the currently open datasets for this Session object
"""
return tuple(n for n in self.datasets.keys())
def get_dataset(self,
name: str):
"""
Get the dataset with the given name. If there is no dataset with that name, raise a KeyError
indicating that the dataset with that name is not present.
:param name: Name of the dataset to be fetched. This is the name that was given to it
when it was opened through :py:meth:`~session.Session.open_dataset`.
:return: Dataset with that name.
"""
return self.datasets[name]
def close(self):
"""
Close all open datasets.
:return: None
"""
for v in self.datasets.values():
v.close()
self.datasets = dict()
def get_shared_index(self, keys: Tuple[np.ndarray]):
"""
Create a shared index based on a tuple of numpy arrays containing keys.
This function generates the sorted union of a tuple of key fields and
then maps the individual arrays to their corresponding indices in the
sorted union.
:param keys: a tuple of groups, fields or ndarrays whose contents represent keys
Example::
key_1 = ['a', 'b', 'e', 'g', 'i']
key_2 = ['<KEY>']
key_3 = ['a', 'c' 'd', 'e', 'g', 'h', 'h', 'i']
sorted_union = ['a', 'b', 'c', 'd', 'e', 'g', 'h', 'i', 'j']
key_1_index = [0, 1, 4, 5, 7]
key_2_index = [1, 1, 2, 2, 4, 5, 8]
key_3_index = [0, 2, 3, 4, 5, 6, 6, 7]
"""
if not isinstance(keys, tuple):
raise ValueError("'keys' must be a tuple")
concatted = None
raw_keys = list()
for k in keys:
raw_field = val.raw_array_from_parameter(self, 'keys', k)
raw_keys.append(raw_field)
if concatted is None:
concatted = pd.unique(raw_field)
else:
concatted = np.concatenate((concatted, raw_field), axis=0)
concatted = pd.unique(concatted)
concatted = np.sort(concatted)
return tuple(np.searchsorted(concatted, k) for k in raw_keys)
def set_timestamp(self,
timestamp: str = str(datetime.now(timezone.utc))):
"""
Set the default timestamp to be used when creating fields without specifying
an explicit timestamp.
:param timestamp: a string representing a valid Datetime
:return: None
"""
if not isinstance(timestamp, str):
error_str = "'timestamp' must be a string but is of type {}"
raise ValueError(error_str.format(type(timestamp)))
self.timestamp = timestamp
def sort_on(self,
src_group: h5py.Group,
dest_group: h5py.Group,
keys: Union[tuple, list],
timestamp=datetime.now(timezone.utc), write_mode='write', verbose=True):
"""
Sort a group (src_group) of fields by the specified set of keys, and write the
sorted fields to dest_group.
:param src_group: the group of fields that are to be sorted
:param dest_group: the group into which sorted fields are written
:param keys: fields to sort on
:param timestamp: optional - timestamp to write on the sorted fields
:param write_mode: optional - write mode to use if the destination fields already exist
:return: None
"""
# TODO: fields is being ignored at present
def print_if_verbose(*args):
if verbose:
print(*args)
readers = tuple(self.get(src_group[f]) for f in keys)
t1 = time.time()
sorted_index = self.dataset_sort_index(
readers, np.arange(len(readers[0].data), dtype=np.uint32))
print_if_verbose(f'sorted {keys} index in {time.time() - t1}s')
t0 = time.time()
for k in src_group.keys():
t1 = time.time()
if src_group != dest_group:
r = self.get(src_group[k])
w = r.create_like(dest_group, k, timestamp)
self.apply_index(sorted_index, r, w)
del r
del w
else:
r = self.get(src_group[k]).writeable()
if r.indexed:
i, v = self.apply_index(sorted_index, r)
r.indices[:] = i
r.values[:] = v
else:
r.data[:] = self.apply_index(sorted_index, r)
del r
print_if_verbose(f" '{k}' reordered in {time.time() - t1}s")
print_if_verbose(f"fields reordered in {time.time() - t0}s")
def dataset_sort_index(self, sort_indices, index=None):
"""
Generate a sorted index based on a set of fields upon which to sort and an optional
index to apply to the sort_indices.
:param sort_indices: a tuple or list of indices that determine the sorted order
:param index: optional - the index by which the initial field should be permuted
:return: the resulting index that can be used to permute unsorted fields
"""
val._check_all_readers_valid_and_same_type(sort_indices)
r_readers = tuple(reversed(sort_indices))
raw_data = val.raw_array_from_parameter(self, 'readers', r_readers[0])
if index is None:
raw_index = np.arange(len(raw_data))
else:
raw_index = val.raw_array_from_parameter(self, 'index', index)
acc_index = raw_index
fdata = raw_data[acc_index]
index = np.argsort(fdata, kind='stable')
acc_index = acc_index[index]
for r in r_readers[1:]:
raw_data = val.raw_array_from_parameter(self, 'readers', r)
fdata = raw_data[acc_index]
index = np.argsort(fdata, kind='stable')
acc_index = acc_index[index]
return acc_index
def apply_filter(self, filter_to_apply, src, dest=None):
"""
Apply a filter to an a src field. The filtered field is written to dest if it set,
and returned from the function call. If the field is an IndexedStringField, the
indices and values are returned separately.
:param filter_to_apply: the filter to be applied to the source field, an array of boolean
:param src: the field to be filtered
:param dest: optional - a field to write the filtered data to
:return: the filtered values
"""
filter_to_apply_ = val.array_from_parameter(self, 'index_to_apply', filter_to_apply)
writer_ = None
if dest is not None:
writer_ = val.field_from_parameter(self, 'writer', dest)
if isinstance(src, Field):
newfld = src.apply_filter(filter_to_apply_, writer_)
if src.indexed:
return newfld.indices[:], newfld.values[:]
else:
return newfld.data[:]
# elif isinstance(src, df.datafrme):
else:
reader_ = val.array_from_parameter(self, 'reader', src)
result = reader_[filter_to_apply]
if writer_:
writer_.data.write(result)
return result
def apply_index(self, index_to_apply, src, dest=None):
"""
Apply a index to an a src field. The indexed field is written to dest if it set,
and returned from the function call. If the field is an IndexedStringField, the
indices and values are returned separately.
:param index_to_apply: the index to be applied to the source field, must be one of Group, Field, or ndarray
:param src: the field to be index
:param dest: optional - a field to write the indexed data to
:return: the indexed values
"""
index_to_apply_ = val.array_from_parameter(self, 'index_to_apply', index_to_apply)
writer_ = None
if dest is not None:
writer_ = val.field_from_parameter(self, 'writer', dest)
if isinstance(src, Field):
newfld = src.apply_index(index_to_apply_, writer_)
if src.indexed:
return newfld.indices[:], newfld.values[:]
else:
return newfld.data[:]
# if src.indexed:
# dest_indices, dest_values = \
# ops.apply_indices_to_index_values(index_to_apply_,
# src.indices[:], src.values[:])
# return dest_indices, dest_values
# elif isinstance(src, Field):
# newfld = src.apply_index(index_to_apply_, writer_)
# return newfld.data[:]
else:
reader_ = val.array_from_parameter(self, 'reader', src)
result = reader_[index_to_apply]
if writer_:
writer_.data.write(result)
return result
def distinct(self, field=None, fields=None, filter=None):
if field is None and fields is None:
return ValueError("One of 'field' and 'fields' must be set")
if field is not None and fields is not None:
return ValueError("Only one of 'field' and 'fields' may be set")
if field is not None:
return np.unique(field)
entries = [(f'{i}', f.dtype) for i, f in enumerate(fields)]
unified = np.empty_like(fields[0], dtype=np.dtype(entries))
for i, f in enumerate(fields):
unified[f'{i}'] = f
uniques = np.unique(unified)
results = [uniques[f'{i}'] for i in range(len(fields))]
return results
def get_spans(self, field: Union[Field, np.ndarray] = None,
dest: Field = None, **kwargs):
"""
Calculate a set of spans that indicate contiguous equal values.
The entries in the result array correspond to the inclusive start and
exclusive end of the span (the ith span is represented by element i and
element i+1 of the result array). The last entry of the result array is
the length of the source field.
Only one of 'field' or 'fields' may be set. If 'fields' is used and more
than one field specified, the fields are effectively zipped and the check
for spans is carried out on each corresponding tuple in the zipped field.
Example::
field: [1, 2, 2, 1, 1, 1, 3, 4, 4, 4, 2, 2, 2, 2, 2]
result: [0, 1, 3, 6, 7, 10, 15]
:param field: A Field or numpy array to be evaluated for spans
:param dest: A destination Field to store the result
:param \*\*kwargs: See below. For parameters set in both argument and kwargs, use kwargs
:Keyword Arguments:
* field -- Similar to field parameter, in case user specify field as keyword
* fields -- A tuple of Fields or tuple of numpy arrays to be evaluated for spans
* dest -- Similar to dest parameter, in case user specify as keyword
:return: The resulting set of spans as a numpy array
"""
fields = []
result = None
if len(kwargs) > 0:
for k in kwargs.keys():
if k == 'field':
field = kwargs[k]
elif k == 'fields':
fields = kwargs[k]
elif k == 'dest':
dest = kwargs[k]
if dest is not None and not isinstance(dest, Field):
raise TypeError(f"'dest' must be one of 'Field' but is {type(dest)}")
if field is not None:
if isinstance(field, Field):
result = field.get_spans()
elif isinstance(field, np.ndarray):
result = ops.get_spans_for_field(field)
elif len(fields) > 0:
if isinstance(fields[0], Field):
result = ops._get_spans_for_2_fields_by_spans(fields[0].get_spans(), fields[1].get_spans())
elif isinstance(fields[0], np.ndarray):
result = ops._get_spans_for_2_fields(fields[0], fields[1])
else:
raise ValueError("One of 'field' and 'fields' must be set")
if dest is not None:
dest.data.write(result)
return dest
else:
return result
def _apply_spans_no_src(self,
predicate: Callable[[np.ndarray, np.ndarray], None],
spans: np.ndarray,
dest: Field = None) -> np.ndarray:
"""
An implementation method for span applications that are carried out on the spans themselves rather than a target
field.
:param predicate: a predicate function that carries out the operation on the spans and produces the result
:param spans: the numpy array of spans to be applied
:param dest: if set, the field to which the results are written
:returns: A numpy array containing the resulting values
"""
assert (dest is None or isinstance(dest, Field))
if dest is not None:
dest_f = val.field_from_parameter(self, 'dest', dest)
results = np.zeros(len(spans) - 1, dtype=dest_f.data.dtype)
predicate(spans, results)
dest_f.data.write(results)
return results
else:
results = np.zeros(len(spans) - 1, dtype='int64')
predicate(spans, results)
return results
def _apply_spans_src(self,
predicate: Callable[[np.ndarray, np.ndarray, np.ndarray], None],
spans: np.ndarray,
target: np.ndarray,
dest: Field = None) -> np.ndarray:
"""
An implementation method for span applications that are carried out on a target field.
:param predicate: a predicate function that carries out the operation on the spans and a target field, and
produces the result
:param spans: the numpy array of spans to be applied
:param target: the field to which the spans and predicate are applied
:param dest: if set, the field to which the results are written
:returns: A numpy array containing the resulting values
"""
assert (dest is None or isinstance(dest, Field))
target_ = val.array_from_parameter(self, 'target', target)
if len(target) != spans[-1]:
error_msg = ("'target' (length {}) must be one element shorter than 'spans' "
"(length {})")
raise ValueError(error_msg.format(len(target_), len(spans)))
if dest is not None:
dest_f = val.field_from_parameter(self, 'dest', dest)
results = np.zeros(len(spans) - 1, dtype=dest_f.data.dtype)
predicate(spans, target_, results)
dest_f.data.write(results)
return results
else:
results = np.zeros(len(spans) - 1, dtype=target_.dtype)
predicate(spans, target_, results)
return results
def apply_spans_index_of_min(self,
spans: np.ndarray,
target: np.ndarray,
dest: Field = None):
"""
Finds the index of the minimum value within each span on a target field.
:param spans: the numpy array of spans to be applied
:param target: the field to which the spans are applied
:param dest: if set, the field to which the results are written
:returns: A numpy array containing the resulting values
"""
return self._apply_spans_src(ops.apply_spans_index_of_min, spans, target, dest)
def apply_spans_index_of_max(self,
spans: np.ndarray,
target: np.ndarray,
dest: Field = None):
"""
Finds the index of the maximum value within each span on a target field.
:param spans: the numpy array of spans to be applied
:param target: the field to which the spans are applied
:param dest: if set, the field to which the results are written
:returns: A numpy array containing the resulting values
"""
return self._apply_spans_src(ops.apply_spans_index_of_max, spans, target, dest)
def apply_spans_index_of_first(self,
spans: np.ndarray,
dest: Field = None):
"""
Finds the index of the first entry within each span.
:param spans: the numpy array of spans to be applied
:param dest: if set, the field to which the results are written
:returns: A numpy array containing the resulting values
"""
return self._apply_spans_no_src(ops.apply_spans_index_of_first, spans, dest)
def apply_spans_index_of_last(self,
spans: np.ndarray,
dest: Field = None):
"""
Finds the index of the last entry within each span.
:param spans: the numpy array of spans to be applied
:param dest: if set, the field to which the results are written
:returns: A numpy array containing the resulting values
"""
return self._apply_spans_no_src(ops.apply_spans_index_of_last, spans, dest)
def apply_spans_count(self,
spans: np.ndarray,
dest: Field = None):
"""
Finds the number of entries within each span.
:param spans: the numpy array of spans to be applied
:param dest: if set, the field to which the results are written
:returns: A numpy array containing the resulting values
"""
return self._apply_spans_no_src(ops.apply_spans_count, spans, dest)
def apply_spans_min(self,
spans: np.ndarray,
target: np.ndarray,
dest: Field = None):
"""
Finds the minimum value within span on a target field.
:param spans: the numpy array of spans to be applied
:param target: the field to which the spans are applied
:param dest: if set, the field to which the results are written
:returns: A numpy array containing the resulting values
"""
return self._apply_spans_src(ops.apply_spans_min, spans, target, dest)
def apply_spans_max(self,
spans: np.ndarray,
target: np.ndarray,
dest: Field = None):
"""
Finds the maximum value within each span on a target field.
:param spans: the numpy array of spans to be applied
:param target: the field to which the spans are applied
:param dest: if set, the field to which the results are written
:returns: A numpy array containing the resulting values
"""
return self._apply_spans_src(ops.apply_spans_max, spans, target, dest)
def apply_spans_first(self,
spans: np.ndarray,
target: np.ndarray,
dest: Field = None):
"""
Finds the first entry within each span on a target field.
:param spans: the numpy array of spans to be applied
:param target: the field to which the spans are applied
:param dest: if set, the field to which the results are written
:returns: A numpy array containing the resulting values
"""
return self._apply_spans_src(ops.apply_spans_first, spans, target, dest)
def apply_spans_last(self,
spans: np.ndarray,
target: np.ndarray,
dest: Field = None):
"""
Finds the last entry within each span on a target field.
:param spans: the numpy array of spans to be applied
:param target: the field to which the spans are applied
:param dest: if set, the field to which the results are written
:returns: A numpy array containing the resulting values
"""
return self._apply_spans_src(ops.apply_spans_last, spans, target, dest)
def apply_spans_concat(self,
spans,
target,
dest,
src_chunksize=None,
dest_chunksize=None,
chunksize_mult=None):
if not target.indexed:
raise ValueError(f"'target' must be one of 'IndexedStringField' but is {type(target)}")
if not dest.indexed:
raise ValueError(f"'dest' must be one of 'IndexedStringField' but is {type(dest)}")
src_chunksize = target.chunksize if src_chunksize is None else src_chunksize
dest_chunksize = dest.chunksize if dest_chunksize is None else dest_chunksize
chunksize_mult = 16 if chunksize_mult is None else chunksize_mult
src_index = target.indices[:]
src_values = target.values[:]
dest_index = np.zeros(src_chunksize, src_index.dtype)
dest_values = np.zeros(dest_chunksize * chunksize_mult, src_values.dtype)
max_index_i = src_chunksize
max_value_i = dest_chunksize * chunksize_mult // 2
if src_values.dtype == 'S1':
separator = b','
delimiter = b'"'
elif src_values.dtype == np.uint8:
separator = np.frombuffer(b',', dtype='S1')[0][0]
delimiter = np.frombuffer(b'"', dtype='S1')[0][0]
s = 0
index_v = 0
while s < len(spans) - 1:
# s, index_i, index_v = per._apply_spans_concat(spans, src_index, src_values,
# dest_index, dest_values,
# max_index_i, max_value_i, s,
# separator, delimiter)
s, index_i, index_v = per._apply_spans_concat_2(spans, src_index, src_values,
dest_index, dest_values,
max_index_i, max_value_i,
separator, delimiter, s, index_v)
if index_i > 0 or index_v > 0:
dest.indices.write_part(dest_index[:index_i])
dest.values.write_part(dest_values[:index_v])
dest.indices.complete()
dest.values.complete()
# dest.write_raw(dest_index[:index_i], dest_values[:index_v])
# dest.complete()
def _aggregate_impl(self, predicate, index, target=None, dest=None):
"""
An implementation method for aggregation of fields via various predicates. This method takes a predicate that
defines an operation to be carried out, an 'index' array or field that determines the groupings over which the
predicate applies, and a 'target' array or field that the operation is carried out upon, should a target be
needed. If a 'dest' Field is supplied, the results will be written to it.
:param predicate: a predicate function that carries out the operation on the spans and produces the result
:param index: A numpy array or field representing the sub-ranges that can be aggregated
:param target: A numpy array upon which the operation is required. This only needs to be set for certain
operations.
:param dest: If set, the Field to which the results are written
:returns: A numpy array containing the resulting values
"""
index_ = val.raw_array_from_parameter(self, "index", index)
dest_field = None
if dest is not None:
dest_field = val.field_from_parameter(self, "dest", dest)
fkey_index_spans = self.get_spans(field=index)
# execute the predicate (note that not every predicate requires a target)
if target is None:
results = predicate(fkey_index_spans, dest_field)
else:
results = predicate(fkey_index_spans, target, dest_field)
return dest if dest is not None else results
def aggregate_count(self, index, dest=None):
"""
Finds the number of entries within each sub-group of index.
Example::
Index: a a a b b x a c c d d d
Result: 3 2 1 1 2 3
:param index: A numpy array or Field containing the index that defines the ranges over which count is applied.
:param dest: If set, a Field to which the resulting counts are written
:returns: A numpy array containing the resulting values
"""
return self._aggregate_impl(self.apply_spans_count, index, None, dest)
def aggregate_first(self, index, target=None, dest=None):
"""
Finds the first entries within each sub-group of index.
Example:
Index: a a a b b x a c c d d d
Target: 1 2 3 4 5 6 7 8 9 0 1 2
Result: 1 4 6 7 8 0
:param index: A numpy array or Field containing the index that defines the ranges over which count is applied.
:param target: A numpy array to which the index and predicate are applied
:param dest: If set, a Field to which the resulting counts are written
:returns: A numpy array containing the resulting values
"""
return self.aggregate_custom(self.apply_spans_first, index, target, dest)
def aggregate_last(self, index, target=None, dest=None):
"""
Finds the first entries within each sub-group of index.
Example::
Index: a a a b b x a c c d d d
Target: 1 2 3 4 5 6 7 8 9 0 1 2
Result: 3 5 6 7 9 2
:param index: A numpy array or Field containing the index that defines the ranges over which count is applied.
:param target: A numpy array to which the index and predicate are applied
:param dest: If set, a Field to which the resulting counts are written
:returns: A numpy array containing the resulting values
"""
return self.aggregate_custom(self.apply_spans_last, index, target, dest)
def aggregate_min(self, index, target=None, dest=None):
"""
Finds the minimum value within each sub-group of index.
Example::
Index: a a a b b x a c c d d d
Target: 1 2 3 5 4 6 7 8 9 2 1 0
Result: 1 4 6 7 8 0
:param index: A numpy array or Field containing the index that defines the ranges over which min is applied.
:param target: A numpy array to which the index and predicate are applied
:param dest: If set, a Field to which the resulting counts are written
:returns: A numpy array containing the resulting values
"""
return self.aggregate_custom(self.apply_spans_min, index, target, dest)
def aggregate_max(self, index, target=None, dest=None):
"""
Finds the maximum value within each sub-group of index.
Example:
Index: a a a b b x a c c d d d
Target: 1 2 3 5 4 6 7 8 9 2 1 0
Result: 3 5 6 7 9 2
:param index: A numpy array or Field containing the index that defines the ranges over which max is applied.
:param target: A numpy array to which the index and predicate are applied
:param dest: If set, a Field to which the resulting counts are written
:returns: A numpy array containing the resulting values
"""
return self.aggregate_custom(self.apply_spans_max, index, target, dest)
def aggregate_custom(self, predicate, index, target=None, dest=None):
if target is None:
raise ValueError("'src' must not be None")
val.ensure_valid_field_like("src", target)
if dest is not None:
val.ensure_valid_field("dest", dest)
return self._aggregate_impl(predicate, index, target, dest)
def join(self,
destination_pkey, fkey_indices, values_to_join,
writer=None, fkey_index_spans=None):
"""
This method is due for removal and should not be used.
Please use the merge or ordered_merge functions instead.
"""
if isinstance(destination_pkey, Field) and destination_pkey.indexed:
raise ValueError("'destination_pkey' must not be an indexed string field")
if isinstance(fkey_indices, Field) and fkey_indices.indexed:
raise ValueError("'fkey_indices' must not be an indexed string field")
if isinstance(values_to_join, rw.IndexedStringReader):
raise ValueError("Joins on indexed string fields are not supported")
raw_fkey_indices = val.raw_array_from_parameter(self, "fkey_indices", fkey_indices)
raw_values_to_join = val.raw_array_from_parameter(self, "values_to_join", values_to_join)
# generate spans for the sorted key indices if not provided
if fkey_index_spans is None:
fkey_index_spans = self.get_spans(field=raw_fkey_indices)
# select the foreign keys from the start of each span to get an ordered list
# of unique id indices in the destination space that the results of the predicate
# execution are mapped to
unique_fkey_indices = raw_fkey_indices[fkey_index_spans[:-1]]
# generate a filter to remove invalid foreign key indices (where values in the
# foreign key don't map to any values in the destination space
invalid_filter = unique_fkey_indices < operations.INVALID_INDEX
safe_unique_fkey_indices = unique_fkey_indices[invalid_filter]
# the predicate results are in the same space as the unique_fkey_indices, which
# means they may still contain invalid indices, so filter those now
safe_values_to_join = raw_values_to_join[invalid_filter]
# now get the memory that the results will be mapped to
# destination_space_values = writer.chunk_factory(len(destination_pkey))
destination_space_values = np.zeros(len(destination_pkey), dtype=raw_values_to_join.dtype)
# finally, map the results from the source space to the destination space
destination_space_values[safe_unique_fkey_indices] = safe_values_to_join
if writer is not None:
writer.data.write(destination_space_values)
else:
return destination_space_values
def predicate_and_join(self,
predicate, destination_pkey, fkey_indices,
reader=None, writer=None, fkey_index_spans=None):
"""
This method is due for removal and should not be used.
Please use the merge or ordered_merge functions instead.
"""
if reader is not None:
if not isinstance(reader, rw.Reader):
raise ValueError(f"'reader' must be a type of Reader but is {type(reader)}")
if isinstance(reader, rw.IndexedStringReader):
raise ValueError(f"Joins on indexed string fields are not supported")
# generate spans for the sorted key indices if not provided
if fkey_index_spans is None:
fkey_index_spans = self.get_spans(field=fkey_indices)
# select the foreign keys from the start of each span to get an ordered list
# of unique id indices in the destination space that the results of the predicate
# execution are mapped to
unique_fkey_indices = fkey_indices[:][fkey_index_spans[:-1]]
# generate a filter to remove invalid foreign key indices (where values in the
# foreign key don't map to any values in the destination space
invalid_filter = unique_fkey_indices < operations.INVALID_INDEX
safe_unique_fkey_indices = unique_fkey_indices[invalid_filter]
# execute the predicate (note that not every predicate requires a reader)
if reader is not None:
dtype = reader.dtype()
else:
dtype = np.uint32
results = np.zeros(len(fkey_index_spans) - 1, dtype=dtype)
predicate(fkey_index_spans, reader, results)
# the predicate results are in the same space as the unique_fkey_indices, which
# means they may still contain invalid indices, so filter those now
safe_results = results[invalid_filter]
# now get the memory that the results will be mapped to
destination_space_values = writer.chunk_factory(len(destination_pkey))
# finally, map the results from the source space to the destination space
destination_space_values[safe_unique_fkey_indices] = safe_results
writer.write(destination_space_values)
def get(self,
field: Union[Field, h5py.Group]):
"""
Get a Field from a h5py Group.
Example::
# this code for context
with Session() as s:
# open a dataset about wildlife
src = s.open_dataset("/my/wildlife/dataset.hdf5", "r", "src")
# fetch the group containing bird data
birds = src['birds']
# get the bird decibel field
bird_decibels = s.get(birds['decibels'])
:param field: The Field or Group object to retrieve.
"""
if isinstance(field, Field):
return field
if 'fieldtype' not in field.attrs.keys():
raise ValueError(f"'{field}' is not a well-formed field")
fieldtype_map = {
'indexedstring': fld.IndexedStringField,
'fixedstring': fld.FixedStringField,
'categorical': fld.CategoricalField,
'boolean': fld.NumericField,
'numeric': fld.NumericField,
'datetime': fld.TimestampField,
'date': fld.TimestampField,
'timestamp': fld.TimestampField
}
fieldtype = field.attrs['fieldtype'].split(',')[0]
return fieldtype_map[fieldtype](self, field, None, field.name)
def create_like(self, field, dest_group, dest_name, timestamp=None, chunksize=None):
"""
Create a field of the same type as an existing field, in the location and with the name provided.
Example::
with Session as s:
...
a = s.get(table_1['a'])
b = s.create_like(a, table_2, 'a_times_2')
b.data.write(a.data[:] * 2)
:param field: The Field whose type is to be copied
:param dest_group: The group in which the new field should be created
:param dest_name: The name of the new field
"""
if isinstance(field, h5py.Group):
if 'fieldtype' not in field.attrs.keys():
raise ValueError("{} is not a well-formed field".format(field))
f = self.get(field)
return f.create_like(dest_group, dest_name)
elif isinstance(field, Field):
return field.create_like(dest_group, dest_name)
else:
raise ValueError("'field' must be either a Field or a h5py.Group, but is {}".format(type(field)))
def create_indexed_string(self, group, name, timestamp=None, chunksize=None):
"""
Create an indexed string field in the given DataFrame with the given name.
:param group: The group in which the new field should be created
:param name: The name of the new field
:param timestamp: If set, the timestamp that should be given to the new field. If not set
datetime.now() is used.
:param chunksize: If set, the chunksize that should be used to create the new field. In general, this should
not be set unless you are writing unit tests.
"""
if not isinstance(group, (df.DataFrame, h5py.Group)):
if isinstance(group, ds.Dataset):
raise ValueError("'group' must be an ExeTera DataFrame rather than a"
" top-level Dataset")
else:
raise ValueError("'group' must be an Exetera DataFrame but a "
"{} was passed to it".format(type(group)))
if isinstance(group, h5py.Group):
fld.indexed_string_field_constructor(self, group, name, timestamp, chunksize)
return fld.IndexedStringField(self, group[name], None, write_enabled=True)
else:
return group.create_indexed_string(name, timestamp, chunksize)
def create_fixed_string(self, group, name, length, timestamp=None, chunksize=None):
"""
Create a fixed string field in the given DataFrame, given name, and given max string length per entry.
:param group: The group in which the new field should be created
:param name: The name of the new field
:param length: The maximum length in bytes that each entry can have.
:param timestamp: If set, the timestamp that should be given to the new field. If not set
datetime.now() is used.
:param chunksize: If set, the chunksize that should be used to create the new field. In general, this should
not be set unless you are writing unit tests.
"""
if not isinstance(group, (df.DataFrame, h5py.Group)):
if isinstance(group, ds.Dataset):
raise ValueError("'group' must be an ExeTera DataFrame rather than a"
" top-level Dataset")
else:
raise ValueError("'group' must be an Exetera DataFrame but a "
"{} was passed to it".format(type(group)))
if isinstance(group, h5py.Group):
fld.fixed_string_field_constructor(self, group, name, length, timestamp, chunksize)
return fld.FixedStringField(self, group[name], None, write_enabled=True)
else:
return group.create_fixed_string(name, length, timestamp, chunksize)
def create_categorical(self, group, name, nformat, key, timestamp=None, chunksize=None):
"""
Create a categorical field in the given DataFrame with the given name. This function also takes a numerical
format for the numeric representation of the categories, and a key that maps numeric values to their string
string descriptions.
:param group: The group in which the new field should be created
:param name: The name of the new field
:param nformat: A numerical type in the set (int8, uint8, int16, uint18, int32, uint32, int64). It is
recommended to use 'int8'.
:param key: A dictionary that maps numerical values to their string representations
:param timestamp: If set, the timestamp that should be given to the new field. If not set
datetime.now() is used.
:param chunksize: If set, the chunksize that should be used to create the new field. In general, this should
not be set unless you are writing unit tests.
"""
if not isinstance(group, (df.DataFrame, h5py.Group)):
if isinstance(group, ds.Dataset):
raise ValueError("'group' must be an ExeTera DataFrame rather than a"
" top-level Dataset")
else:
raise ValueError("'group' must be an Exetera DataFrame but a "
"{} was passed to it".format(type(group)))
if isinstance(group, h5py.Group):
fld.categorical_field_constructor(self, group, name, nformat, key, timestamp, chunksize)
return fld.CategoricalField(self, group[name], None, write_enabled=True)
else:
return group.create_categorical(name, nformat, key, timestamp, chunksize)
def create_numeric(self, group, name, nformat, timestamp=None, chunksize=None):
"""
Create a numeric field in the given DataFrame with the given name.
:param group: The group in which the new field should be created
:param name: The name of the new field
:param nformat: A numerical type in the set (int8, uint8, int16, uint18, int32, uint32, int64, uint64,
float32, float64). It is recommended to avoid uint64 as certain operations in numpy cause conversions to
floating point values.
:param timestamp: If set, the timestamp that should be given to the new field. If not set
datetime.now() is used.
:param chunksize: If set, the chunksize that should be used to create the new field. In general, this should
not be set unless you are writing unit tests.
"""
if not isinstance(group, (df.DataFrame, h5py.Group)):
if isinstance(group, ds.Dataset):
raise ValueError("'group' must be an ExeTera DataFrame rather than a"
" top-level Dataset")
else:
raise ValueError("'group' must be an Exetera DataFrame but a "
"{} was passed to it".format(type(group)))
if isinstance(group, h5py.Group):
fld.numeric_field_constructor(self, group, name, nformat, timestamp, chunksize)
return fld.NumericField(self, group[name], None, write_enabled=True)
else:
return group.create_numeric(name, nformat, timestamp, chunksize)
def create_timestamp(self, group, name, timestamp=None, chunksize=None):
"""
Create a timestamp field in the given group with the given name.
"""
if not isinstance(group, (df.DataFrame, h5py.Group)):
if isinstance(group, ds.Dataset):
raise ValueError("'group' must be an ExeTera DataFrame rather than a"
" top-level Dataset")
else:
raise ValueError("'group' must be an Exetera DataFrame but a "
"{} was passed to it".format(type(group)))
if isinstance(group, h5py.Group):
fld.timestamp_field_constructor(self, group, name, timestamp, chunksize)
return fld.TimestampField(self, group[name], None, write_enabled=True)
else:
return group.create_timestamp(name, timestamp, chunksize)
def get_or_create_group(self,
group: Union[h5py.Group, h5py.File],
name: str):
"""
Note: this function is deprecated, and provided only for compatibility with existing scripts.
It will be removed in a future version.
"""
if name in group:
return group[name]
return group.create_group(name)
def chunks(self,
length: int,
chunksize: Optional[int] = None):
"""
Note: this function is deprecated, and provided only for compatibility with existing scripts.
It will be removed in a future version.
'chunks' is a convenience method that, given an overall length and a chunksize, will yield
a set of ranges for the chunks in question.
ie.
chunks(1048576, 500000) -> (0, 500000), (500000, 1000000), (1000000, 1048576)
:param length: The range to be split into chunks
:param chunksize: Optional parameter detailing the size of each chunk. If not set, the
chunksize that the Session was initialized with is used.
"""
if chunksize is None:
chunksize = self.chunksize
cur = 0
while cur < length:
next = min(length, cur + chunksize)
yield cur, next
cur = next
# def process(self,
# inputs,
# outputs,
# predicate):
# """
# Note: this function is deprecated, and provided only for compatibility with existing scripts.
# It will be removed in a future version.
# """
#
# # TODO: modifying the dictionaries in place is not great
# input_readers = dict()
# for k, v in inputs.items():
# if isinstance(v, fld.Field):
# input_readers[k] = v
# else:
# input_readers[k] = self.get(v)
# output_writers = dict()
# output_arrays = dict()
# for k, v in outputs.items():
# if isinstance(v, fld.Field):
# output_writers[k] = v
# else:
# raise ValueError("'outputs': all values must be 'Writers'")
#
# reader = next(iter(input_readers.values()))
# input_length = len(reader)
# writer = next(iter(output_writers.values()))
# chunksize = writer.chunksize
# required_chunksize = min(input_length, chunksize)
# for k, v in outputs.items():
# output_arrays[k] = output_writers[k].chunk_factory(required_chunksize)
#
# for c in self.chunks(input_length, chunksize):
# kwargs = dict()
#
# for k, v in inputs.items():
# kwargs[k] = v.data[c[0]:c[1]]
# for k, v in output_arrays.items():
# kwargs[k] = v.data[:c[1] - c[0]]
# predicate(**kwargs)
#
# # TODO: write back to the writer
# for k in output_arrays.keys():
# output_writers[k].data.write_part(kwargs[k])
# for k, v in output_writers.items():
# output_writers[k].data.complete()
def get_index(self, target, foreign_key, destination=None):
"""
Note: this function is deprecated, and provided only for compatibility with existing scripts.
It will be removed in a future version.
Please make use of Dataframe.merge functionality instead. This method can be emulated by
adding an index (via np.arange) to a dataframe, performing a merge and then fetching the
mapped index field.
'get_index' maps a primary key ('target') into the space of a foreign key ('foreign_key').
"""
print(' building patient_id index')
t0 = time.time()
target_lookup = dict()
target_ = val.raw_array_from_parameter(self, "target", target)
for i, v in enumerate(target_):
target_lookup[v] = i
print(f' target lookup built in {time.time() - t0}s')
print(' perform initial index')
t0 = time.time()
foreign_key_elems = val.raw_array_from_parameter(self, "foreign_key", foreign_key)
# foreign_key_index = np.asarray([target_lookup.get(i, -1) for i in foreign_key_elems],
# dtype=np.int64)
foreign_key_index = np.zeros(len(foreign_key_elems), dtype=np.int64)
current_invalid = np.int64(operations.INVALID_INDEX)
for i_k, k in enumerate(foreign_key_elems):
index = target_lookup.get(k, current_invalid)
if index >= operations.INVALID_INDEX:
current_invalid += 1
target_lookup[k] = index
foreign_key_index[i_k] = index
print(f' initial index performed in {time.time() - t0}s')
if destination is not None:
if val.is_field_parameter(destination):
destination.data.write(foreign_key_index)
else:
destination[:] = foreign_key_index
else:
return foreign_key_index
def temp_filename(self):
uid = str(uuid.uuid4())
while os.path.exists(uid + '.hdf5'):
uid = str(uuid.uuid4())
return uid + '.hdf5'
def merge_left(self, left_on, right_on,
right_fields=tuple(), right_writers=None):
"""
Note: this function is deprecated, and provided only for compatibility with existing scripts.
It will be removed in a future version.
Please use DataFrame.merge instead.
Perform a database-style left join on right_fields, outputting the result to right_writers, if set.
:param left_on: The key to perform the join on on the left hand side
:param right_on: The key to perform the join on on the right hand side
:param right_fields: The fields to be mapped from right to left
:param right_writers: Optional parameter providing the fields to which the mapped data should
be written. If this is not set, the mapped data is returned as numpy arrays and lists instead.
"""
l_key_raw = val.raw_array_from_parameter(self, 'left_on', left_on)
l_index = np.arange(len(l_key_raw), dtype=np.int64)
l_df = pd.DataFrame({'l_k': l_key_raw, 'l_index': l_index})
r_key_raw = val.raw_array_from_parameter(self, 'right_on', right_on)
r_index = np.arange(len(r_key_raw), dtype=np.int64)
r_df = pd.DataFrame({'r_k': r_key_raw, 'r_index': r_index})
df = pd.merge(left=l_df, right=r_df, left_on='l_k', right_on='r_k', how='left')
r_to_l_map = df['r_index'].to_numpy(dtype=np.int64)
r_to_l_filt = np.logical_not(df['r_index'].isnull()).to_numpy()
right_results = list()
for irf, rf in enumerate(right_fields):
if isinstance(rf, Field):
if rf.indexed:
indices, values = ops.safe_map_indexed_values(rf.indices[:], rf.values[:],
r_to_l_map, r_to_l_filt)
if right_writers is None:
result = fld.IndexedStringMemField(self)
result.indices.write(indices)
result.values.write(values)
right_results.append(result)
else:
right_writers[irf].indices.write(indices)
right_writers[irf].values.write(values)
else:
values = ops.safe_map_values(rf.data[:], r_to_l_map, r_to_l_filt)
if right_writers is None:
result = rf.create_like()
result.data.write(values)
right_results.append(result)
else:
right_writers[irf].data.write(values)
else:
values = ops.safe_map_values(rf, r_to_l_map, r_to_l_filt)
if right_writers is None:
right_results.append(values)
else:
right_writers[irf].data.write(values)
return right_results
def merge_right(self, left_on, right_on,
left_fields=tuple(), left_writers=None):
"""
Note: this function is deprecated, and provided only for compatibility with existing scripts.
It will be removed in a future version.
Please use DataFrame.merge instead.
Perform a database-style right join on left_fields, outputting the result to left_writers, if set.
:param left_on: The key to perform the join on on the left hand side
:param right_on: The key to perform the join on on the right hand side
:param left_fields: The fields to be mapped from right to left
:param left_writers: Optional parameter providing the fields to which the mapped data should
be written. If this is not set, the mapped data is returned as numpy arrays and lists instead.
"""
l_key_raw = val.raw_array_from_parameter(self, 'left_on', left_on)
l_index = np.arange(len(l_key_raw), dtype=np.int64)
l_df = pd.DataFrame({'l_k': l_key_raw, 'l_index': l_index})
r_key_raw = val.raw_array_from_parameter(self, 'right_on', right_on)
r_index = np.arange(len(r_key_raw), dtype=np.int64)
r_df = pd.DataFrame({'r_k': r_key_raw, 'r_index': r_index})
df = pd.merge(left=r_df, right=l_df, left_on='r_k', right_on='l_k', how='left')
l_to_r_map = df['l_index'].to_numpy(dtype='int64')
l_to_r_filt = np.logical_not(df['l_index'].isnull()).to_numpy()
left_results = list()
for ilf, lf in enumerate(left_fields):
if isinstance(lf, Field):
if lf.indexed:
indices, values = ops.safe_map_indexed_values(lf.indices[:], lf.values[:],
l_to_r_map, l_to_r_filt)
if left_writers is None:
result = fld.IndexedStringMemField(self)
result.indices.write(indices)
result.values.write(values)
left_results.append(result)
else:
left_writers[ilf].indices.write(indices)
left_writers[ilf].values.write(values)
else:
values = ops.safe_map_values(lf.data[:], l_to_r_map, l_to_r_filt)
if left_writers is None:
result = lf.create_like()
result.data.write(values)
left_results.append(result)
else:
left_writers[ilf].data.write(values)
else:
values = ops.safe_map_values(lf, l_to_r_map, l_to_r_filt)
if left_writers is None:
left_results.append(values)
else:
left_writers[ilf].data.write(values)
return left_results
def merge_inner(self, left_on, right_on,
left_fields=None, left_writers=None, right_fields=None, right_writers=None):
"""
Note: this function is deprecated, and provided only for compatibility with existing scripts.
It will be removed in a future version.
Please use DataFrame.merge instead.
Perform a database-style inner join on left_fields, outputting the result to left_writers, if set.
:param left_on: The key to perform the join on on the left hand side
:param right_on: The key to perform the join on on the right hand side
:param left_fields: The fields to be mapped from left to inner
:param left_writers: Optional parameter providing the fields to which the mapped data should
be written. If this is not set, the mapped data is returned as numpy arrays and lists instead.
:param right_fields: The fields to be mapped from right to inner
:param right_writers: Optional parameter providing the fields to which the mapped data should
be written. If this is not set, the mapped data is returned as numpy arrays and lists instead.
"""
l_key_raw = val.raw_array_from_parameter(self, 'left_on', left_on)
l_index = np.arange(len(l_key_raw), dtype=np.int64)
l_df = pd.DataFrame({'l_k': l_key_raw, 'l_index': l_index})
r_key_raw = val.raw_array_from_parameter(self, 'right_on', right_on)
r_index = np.arange(len(r_key_raw), dtype=np.int64)
r_df = pd.DataFrame({'r_k': r_key_raw, 'r_index': r_index})
df = | pd.merge(left=l_df, right=r_df, left_on='l_k', right_on='r_k', how='inner') | pandas.merge |
# being a bit too dynamic
# pylint: disable=E1101
import datetime
import warnings
import re
from math import ceil
from collections import namedtuple
from contextlib import contextmanager
from distutils.version import LooseVersion
import numpy as np
from pandas.util.decorators import cache_readonly, deprecate_kwarg
import pandas.core.common as com
from pandas.core.common import AbstractMethodError
from pandas.core.generic import _shared_docs, _shared_doc_kwargs
from pandas.core.index import Index, MultiIndex
from pandas.core.series import Series, remove_na
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.period import PeriodIndex, Period
import pandas.tseries.frequencies as frequencies
from pandas.tseries.offsets import DateOffset
from pandas.compat import range, lrange, lmap, map, zip, string_types
import pandas.compat as compat
from pandas.util.decorators import Appender
try: # mpl optional
import pandas.tseries.converter as conv
conv.register() # needs to override so set_xlim works with str/number
except ImportError:
pass
# Extracted from https://gist.github.com/huyng/816622
# this is the rcParams set when setting display.with_mpl_style
# to True.
mpl_stylesheet = {
'axes.axisbelow': True,
'axes.color_cycle': ['#348ABD',
'#7A68A6',
'#A60628',
'#467821',
'#CF4457',
'#188487',
'#E24A33'],
'axes.edgecolor': '#bcbcbc',
'axes.facecolor': '#eeeeee',
'axes.grid': True,
'axes.labelcolor': '#555555',
'axes.labelsize': 'large',
'axes.linewidth': 1.0,
'axes.titlesize': 'x-large',
'figure.edgecolor': 'white',
'figure.facecolor': 'white',
'figure.figsize': (6.0, 4.0),
'figure.subplot.hspace': 0.5,
'font.family': 'monospace',
'font.monospace': ['Andale Mono',
'Nimbus Mono L',
'Courier New',
'Courier',
'Fixed',
'Terminal',
'monospace'],
'font.size': 10,
'interactive': True,
'keymap.all_axes': ['a'],
'keymap.back': ['left', 'c', 'backspace'],
'keymap.forward': ['right', 'v'],
'keymap.fullscreen': ['f'],
'keymap.grid': ['g'],
'keymap.home': ['h', 'r', 'home'],
'keymap.pan': ['p'],
'keymap.save': ['s'],
'keymap.xscale': ['L', 'k'],
'keymap.yscale': ['l'],
'keymap.zoom': ['o'],
'legend.fancybox': True,
'lines.antialiased': True,
'lines.linewidth': 1.0,
'patch.antialiased': True,
'patch.edgecolor': '#EEEEEE',
'patch.facecolor': '#348ABD',
'patch.linewidth': 0.5,
'toolbar': 'toolbar2',
'xtick.color': '#555555',
'xtick.direction': 'in',
'xtick.major.pad': 6.0,
'xtick.major.size': 0.0,
'xtick.minor.pad': 6.0,
'xtick.minor.size': 0.0,
'ytick.color': '#555555',
'ytick.direction': 'in',
'ytick.major.pad': 6.0,
'ytick.major.size': 0.0,
'ytick.minor.pad': 6.0,
'ytick.minor.size': 0.0
}
def _get_standard_kind(kind):
return {'density': 'kde'}.get(kind, kind)
def _get_standard_colors(num_colors=None, colormap=None, color_type='default',
color=None):
import matplotlib.pyplot as plt
if color is None and colormap is not None:
if isinstance(colormap, compat.string_types):
import matplotlib.cm as cm
cmap = colormap
colormap = cm.get_cmap(colormap)
if colormap is None:
raise ValueError("Colormap {0} is not recognized".format(cmap))
colors = lmap(colormap, np.linspace(0, 1, num=num_colors))
elif color is not None:
if colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
colors = color
else:
if color_type == 'default':
# need to call list() on the result to copy so we don't
# modify the global rcParams below
colors = list(plt.rcParams.get('axes.color_cycle',
list('bgrcmyk')))
if isinstance(colors, compat.string_types):
colors = list(colors)
elif color_type == 'random':
import random
def random_color(column):
random.seed(column)
return [random.random() for _ in range(3)]
colors = lmap(random_color, lrange(num_colors))
else:
raise ValueError("color_type must be either 'default' or 'random'")
if isinstance(colors, compat.string_types):
import matplotlib.colors
conv = matplotlib.colors.ColorConverter()
def _maybe_valid_colors(colors):
try:
[conv.to_rgba(c) for c in colors]
return True
except ValueError:
return False
# check whether the string can be convertable to single color
maybe_single_color = _maybe_valid_colors([colors])
# check whether each character can be convertable to colors
maybe_color_cycle = _maybe_valid_colors(list(colors))
if maybe_single_color and maybe_color_cycle and len(colors) > 1:
msg = ("'{0}' can be parsed as both single color and "
"color cycle. Specify each color using a list "
"like ['{0}'] or {1}")
raise ValueError(msg.format(colors, list(colors)))
elif maybe_single_color:
colors = [colors]
else:
# ``colors`` is regarded as color cycle.
# mpl will raise error any of them is invalid
pass
if len(colors) != num_colors:
multiple = num_colors//len(colors) - 1
mod = num_colors % len(colors)
colors += multiple * colors
colors += colors[:mod]
return colors
class _Options(dict):
"""
Stores pandas plotting options.
Allows for parameter aliasing so you can just use parameter names that are
the same as the plot function parameters, but is stored in a canonical
format that makes it easy to breakdown into groups later
"""
# alias so the names are same as plotting method parameter names
_ALIASES = {'x_compat': 'xaxis.compat'}
_DEFAULT_KEYS = ['xaxis.compat']
def __init__(self):
self['xaxis.compat'] = False
def __getitem__(self, key):
key = self._get_canonical_key(key)
if key not in self:
raise ValueError('%s is not a valid pandas plotting option' % key)
return super(_Options, self).__getitem__(key)
def __setitem__(self, key, value):
key = self._get_canonical_key(key)
return super(_Options, self).__setitem__(key, value)
def __delitem__(self, key):
key = self._get_canonical_key(key)
if key in self._DEFAULT_KEYS:
raise ValueError('Cannot remove default parameter %s' % key)
return super(_Options, self).__delitem__(key)
def __contains__(self, key):
key = self._get_canonical_key(key)
return super(_Options, self).__contains__(key)
def reset(self):
"""
Reset the option store to its initial state
Returns
-------
None
"""
self.__init__()
def _get_canonical_key(self, key):
return self._ALIASES.get(key, key)
@contextmanager
def use(self, key, value):
"""
Temporarily set a parameter value using the with statement.
Aliasing allowed.
"""
old_value = self[key]
try:
self[key] = value
yield self
finally:
self[key] = old_value
plot_params = _Options()
def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
diagonal='hist', marker='.', density_kwds=None,
hist_kwds=None, range_padding=0.05, **kwds):
"""
Draw a matrix of scatter plots.
Parameters
----------
frame : DataFrame
alpha : float, optional
amount of transparency applied
figsize : (float,float), optional
a tuple (width, height) in inches
ax : Matplotlib axis object, optional
grid : bool, optional
setting this to True will show the grid
diagonal : {'hist', 'kde'}
pick between 'kde' and 'hist' for
either Kernel Density Estimation or Histogram
plot in the diagonal
marker : str, optional
Matplotlib marker type, default '.'
hist_kwds : other plotting keyword arguments
To be passed to hist function
density_kwds : other plotting keyword arguments
To be passed to kernel density estimate plot
range_padding : float, optional
relative extension of axis range in x and y
with respect to (x_max - x_min) or (y_max - y_min),
default 0.05
kwds : other plotting keyword arguments
To be passed to scatter function
Examples
--------
>>> df = DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D'])
>>> scatter_matrix(df, alpha=0.2)
"""
import matplotlib.pyplot as plt
from matplotlib.artist import setp
df = frame._get_numeric_data()
n = df.columns.size
naxes = n * n
fig, axes = _subplots(naxes=naxes, figsize=figsize, ax=ax,
squeeze=False)
# no gaps between subplots
fig.subplots_adjust(wspace=0, hspace=0)
mask = com.notnull(df)
marker = _get_marker_compat(marker)
hist_kwds = hist_kwds or {}
density_kwds = density_kwds or {}
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwds.setdefault('c', plt.rcParams['patch.facecolor'])
boundaries_list = []
for a in df.columns:
values = df[a].values[mask[a].values]
rmin_, rmax_ = np.min(values), np.max(values)
rdelta_ext = (rmax_ - rmin_) * range_padding / 2.
boundaries_list.append((rmin_ - rdelta_ext, rmax_+ rdelta_ext))
for i, a in zip(lrange(n), df.columns):
for j, b in zip(lrange(n), df.columns):
ax = axes[i, j]
if i == j:
values = df[a].values[mask[a].values]
# Deal with the diagonal by drawing a histogram there.
if diagonal == 'hist':
ax.hist(values, **hist_kwds)
elif diagonal in ('kde', 'density'):
from scipy.stats import gaussian_kde
y = values
gkde = gaussian_kde(y)
ind = np.linspace(y.min(), y.max(), 1000)
ax.plot(ind, gkde.evaluate(ind), **density_kwds)
ax.set_xlim(boundaries_list[i])
else:
common = (mask[a] & mask[b]).values
ax.scatter(df[b][common], df[a][common],
marker=marker, alpha=alpha, **kwds)
ax.set_xlim(boundaries_list[j])
ax.set_ylim(boundaries_list[i])
ax.set_xlabel(b)
ax.set_ylabel(a)
if j!= 0:
ax.yaxis.set_visible(False)
if i != n-1:
ax.xaxis.set_visible(False)
if len(df.columns) > 1:
lim1 = boundaries_list[0]
locs = axes[0][1].yaxis.get_majorticklocs()
locs = locs[(lim1[0] <= locs) & (locs <= lim1[1])]
adj = (locs - lim1[0]) / (lim1[1] - lim1[0])
lim0 = axes[0][0].get_ylim()
adj = adj * (lim0[1] - lim0[0]) + lim0[0]
axes[0][0].yaxis.set_ticks(adj)
if np.all(locs == locs.astype(int)):
# if all ticks are int
locs = locs.astype(int)
axes[0][0].yaxis.set_ticklabels(locs)
_set_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
return axes
def _gca():
import matplotlib.pyplot as plt
return plt.gca()
def _gcf():
import matplotlib.pyplot as plt
return plt.gcf()
def _get_marker_compat(marker):
import matplotlib.lines as mlines
import matplotlib as mpl
if mpl.__version__ < '1.1.0' and marker == '.':
return 'o'
if marker not in mlines.lineMarkers:
return 'o'
return marker
def radviz(frame, class_column, ax=None, color=None, colormap=None, **kwds):
"""RadViz - a multivariate data visualization algorithm
Parameters:
-----------
frame: DataFrame
class_column: str
Column name containing class names
ax: Matplotlib axis object, optional
color: list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds: keywords
Options to pass to matplotlib scatter plotting method
Returns:
--------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
import matplotlib.patches as patches
def normalize(series):
a = min(series)
b = max(series)
return (series - a) / (b - a)
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
df = frame.drop(class_column, axis=1).apply(normalize)
if ax is None:
ax = plt.gca(xlim=[-1, 1], ylim=[-1, 1])
to_plot = {}
colors = _get_standard_colors(num_colors=len(classes), colormap=colormap,
color_type='random', color=color)
for kls in classes:
to_plot[kls] = [[], []]
m = len(frame.columns) - 1
s = np.array([(np.cos(t), np.sin(t))
for t in [2.0 * np.pi * (i / float(m))
for i in range(m)]])
for i in range(n):
row = df.iloc[i].values
row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1)
y = (s * row_).sum(axis=0) / row.sum()
kls = class_col.iat[i]
to_plot[kls][0].append(y[0])
to_plot[kls][1].append(y[1])
for i, kls in enumerate(classes):
ax.scatter(to_plot[kls][0], to_plot[kls][1], color=colors[i],
label=com.pprint_thing(kls), **kwds)
ax.legend()
ax.add_patch(patches.Circle((0.0, 0.0), radius=1.0, facecolor='none'))
for xy, name in zip(s, df.columns):
ax.add_patch(patches.Circle(xy, radius=0.025, facecolor='gray'))
if xy[0] < 0.0 and xy[1] < 0.0:
ax.text(xy[0] - 0.025, xy[1] - 0.025, name,
ha='right', va='top', size='small')
elif xy[0] < 0.0 and xy[1] >= 0.0:
ax.text(xy[0] - 0.025, xy[1] + 0.025, name,
ha='right', va='bottom', size='small')
elif xy[0] >= 0.0 and xy[1] < 0.0:
ax.text(xy[0] + 0.025, xy[1] - 0.025, name,
ha='left', va='top', size='small')
elif xy[0] >= 0.0 and xy[1] >= 0.0:
ax.text(xy[0] + 0.025, xy[1] + 0.025, name,
ha='left', va='bottom', size='small')
ax.axis('equal')
return ax
@deprecate_kwarg(old_arg_name='data', new_arg_name='frame')
def andrews_curves(frame, class_column, ax=None, samples=200, color=None,
colormap=None, **kwds):
"""
Parameters:
-----------
frame : DataFrame
Data to be plotted, preferably normalized to (0.0, 1.0)
class_column : Name of the column containing class names
ax : matplotlib axes object, default None
samples : Number of points to plot in each curve
color: list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds: keywords
Options to pass to matplotlib plotting method
Returns:
--------
ax: Matplotlib axis object
"""
from math import sqrt, pi, sin, cos
import matplotlib.pyplot as plt
def function(amplitudes):
def f(x):
x1 = amplitudes[0]
result = x1 / sqrt(2.0)
harmonic = 1.0
for x_even, x_odd in zip(amplitudes[1::2], amplitudes[2::2]):
result += (x_even * sin(harmonic * x) +
x_odd * cos(harmonic * x))
harmonic += 1.0
if len(amplitudes) % 2 != 0:
result += amplitudes[-1] * sin(harmonic * x)
return result
return f
n = len(frame)
class_col = frame[class_column]
classes = frame[class_column].drop_duplicates()
df = frame.drop(class_column, axis=1)
x = [-pi + 2.0 * pi * (t / float(samples)) for t in range(samples)]
used_legends = set([])
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
color=color)
colors = dict(zip(classes, color_values))
if ax is None:
ax = plt.gca(xlim=(-pi, pi))
for i in range(n):
row = df.iloc[i].values
f = function(row)
y = [f(t) for t in x]
kls = class_col.iat[i]
label = com.pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(x, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(x, y, color=colors[kls], **kwds)
ax.legend(loc='upper right')
ax.grid()
return ax
def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds):
"""Bootstrap plot.
Parameters:
-----------
series: Time series
fig: matplotlib figure object, optional
size: number of data points to consider during each sampling
samples: number of times the bootstrap procedure is performed
kwds: optional keyword arguments for plotting commands, must be accepted
by both hist and plot
Returns:
--------
fig: matplotlib figure
"""
import random
import matplotlib.pyplot as plt
# random.sample(ndarray, int) fails on python 3.3, sigh
data = list(series.values)
samplings = [random.sample(data, size) for _ in | range(samples) | pandas.compat.range |
import os
import errno
import pandas as pd
import numpy as np
from tabulate import tabulate
from graphviz import Digraph
import untangle as ut
#########################################################################################################
class MultiAutomata(object):
'''
Class for creating multiple automata from a xml file, saving all into a dictionary according their
names into the file.
'''
def __init__(self, block_name):
self.__name = block_name
self.__Automata = {} #Dictionary cantaining all automata by name
def read_xml(self, file):
'''
Read all Automata in the file
'''
aut = ut.parse(file) #Automaton object
for a in aut.Automata.Automaton:
G = Automaton(a['name'])
G.read_xml(file, a['name'])
self.__Automata[a['name']] = G
def get_automata(self):
'''
Return a dictionary containing all automata by name
'''
return self.__Automata
def generate_calls(self):
'''
Create the calls of all events and states present on the set of automata
'''
for a in self.__Automata.values():
a.gen_events_calls()
a.gen_states_calls()
a.gen_translation_table()
#########################################################################################################
class Automaton(object):
'''
Class with tools for dinamicaly building an Automaton and displaying it
aut_name = 'Name for your supervisor automata'
'''
def __init__(self, aut_name = 'automaton'):
self.__name = aut_name
self.__states = pd.DataFrame(columns=['node_id','initial','accepting']) # DataFrame containing states info
self.__events = | pd.DataFrame(columns=['event_id','controllable','transitions']) | pandas.DataFrame |
"""Finds significant mutation combinations."""
from microbepy.common import constants as cn
from microbepy.common import util
from microbepy.common.combination_iterator import CombinationIterator
from microbepy.data import util_data as ud
from microbepy.data.model_data_provider import ModelDataProvider
from microbepy.statistics.group_significance_level \
import GroupSignificanceLevel
import itertools
import numpy as np
import pandas as pd
##############################################
# Class
##############################################
class MutationCombination(object):
def __init__(self, mutation_context,
transform_type=cn.TRANSFORM_LOW_FREQUENCY_ISOLATES,
constraints=None, lines=None):
"""
:param MutationContext mutation_context:
:param str transform_type:
:param list-BooleanFunction constraints: no line constraints
:param list-str lines: Lines for which analysis is done
"""
self._lines = util.setNoneList(lines)
self._context = mutation_context
self._transform_type = transform_type
self._constraints = util.setNoneList(constraints)
@classmethod
def isMock(cls):
"""
Used for dependency injection.
"""
return False
@classmethod
def getFilePrefix(cls):
"""
Used for dependency injection.
"""
return ""
def do(self, max_combination, is_tstat=True,
is_resample=True,
excludes=None,
num_combinations=None,
lines=None):
"""
Searches combinations of mutations and reports their statistical
significance.
:param int max_combination: max mutations in a combination
:param bool is_tstat: report t statistic
:param bool is_resample: report resample statistic
:param list-object excludes: combinations to exclude
:param list-str lines: lines for which analysis is done
:param int num_combinations: maximum combinations computed
all if None
:return pd.DataFrame, pd.DataFrame: DFs for extremas df_min, df_max
cn.MUTATIONS,
cn.SL_TSTAT
cn.SL_RESAMPLE
cn.VALUE - value for the extrema
cn.COUNT - number of values in the extrema
cn.GROUP - group for the extrema in binary
cn.LINE
"""
def assignDF(sl, mutation_combination, line):
df = pd.DataFrame({
cn.AVG: [sl.avg],
cn.GROUP: [sl.group],
cn.COUNT: [sl.count],
cn.MUTATIONS: [mutation_combination],
cn.LINE: [line],
cn.SL_TSTAT: [sl.sl_tstat],
cn.SL_RESAMPLE: [sl.sl_resample],
})
return df
#
excludes = util.setNoneList(excludes)
# Get the mutations to consider
df_min = | pd.DataFrame() | pandas.DataFrame |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numbers
from typing import Any, Union
import pandas as pd
from pandas.api.types import CategoricalDtype
from pyspark.pandas.base import column_op, IndexOpsMixin
from pyspark.pandas._typing import Dtype, IndexOpsLike, SeriesOrIndex
from pyspark.pandas.data_type_ops.base import (
DataTypeOps,
is_valid_operand_for_numeric_arithmetic,
transform_boolean_operand_to_numeric,
_as_bool_type,
_as_categorical_type,
_as_other_type,
)
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef.typehints import as_spark_type, extension_dtypes, pandas_on_spark_type
from pyspark.sql import functions as F
from pyspark.sql.column import Column
from pyspark.sql.types import BooleanType, StringType
class BooleanOps(DataTypeOps):
"""
The class for binary operations of pandas-on-Spark objects with spark type: BooleanType.
"""
@property
def pretty_name(self) -> str:
return "bools"
def add(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError(
"Addition can not be applied to %s and the given type." % self.pretty_name
)
if isinstance(right, bool):
return left.__or__(right)
elif isinstance(right, numbers.Number):
left = transform_boolean_operand_to_numeric(left, spark_type=as_spark_type(type(right)))
return left + right
else:
assert isinstance(right, IndexOpsMixin)
if isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, BooleanType):
return left.__or__(right)
else:
left = transform_boolean_operand_to_numeric(left, spark_type=right.spark.data_type)
return left + right
def sub(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if not is_valid_operand_for_numeric_arithmetic(right, allow_bool=False):
raise TypeError(
"Subtraction can not be applied to %s and the given type." % self.pretty_name
)
if isinstance(right, numbers.Number):
left = transform_boolean_operand_to_numeric(left, spark_type=as_spark_type(type(right)))
return left - right
else:
assert isinstance(right, IndexOpsMixin)
left = transform_boolean_operand_to_numeric(left, spark_type=right.spark.data_type)
return left - right
def mul(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError(
"Multiplication can not be applied to %s and the given type." % self.pretty_name
)
if isinstance(right, bool):
return left.__and__(right)
elif isinstance(right, numbers.Number):
left = transform_boolean_operand_to_numeric(left, spark_type=as_spark_type(type(right)))
return left * right
else:
assert isinstance(right, IndexOpsMixin)
if isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, BooleanType):
return left.__and__(right)
else:
left = transform_boolean_operand_to_numeric(left, spark_type=right.spark.data_type)
return left * right
def truediv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if not is_valid_operand_for_numeric_arithmetic(right, allow_bool=False):
raise TypeError(
"True division can not be applied to %s and the given type." % self.pretty_name
)
if isinstance(right, numbers.Number):
left = transform_boolean_operand_to_numeric(left, spark_type=as_spark_type(type(right)))
return left / right
else:
assert isinstance(right, IndexOpsMixin)
left = transform_boolean_operand_to_numeric(left, spark_type=right.spark.data_type)
return left / right
def floordiv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if not is_valid_operand_for_numeric_arithmetic(right, allow_bool=False):
raise TypeError(
"Floor division can not be applied to %s and the given type." % self.pretty_name
)
if isinstance(right, numbers.Number):
left = transform_boolean_operand_to_numeric(left, spark_type=as_spark_type(type(right)))
return left // right
else:
assert isinstance(right, IndexOpsMixin)
left = transform_boolean_operand_to_numeric(left, spark_type=right.spark.data_type)
return left // right
def mod(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if not is_valid_operand_for_numeric_arithmetic(right, allow_bool=False):
raise TypeError(
"Modulo can not be applied to %s and the given type." % self.pretty_name
)
if isinstance(right, numbers.Number):
left = transform_boolean_operand_to_numeric(left, spark_type=as_spark_type(type(right)))
return left % right
else:
assert isinstance(right, IndexOpsMixin)
left = transform_boolean_operand_to_numeric(left, spark_type=right.spark.data_type)
return left % right
def pow(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if not is_valid_operand_for_numeric_arithmetic(right, allow_bool=False):
raise TypeError(
"Exponentiation can not be applied to %s and the given type." % self.pretty_name
)
if isinstance(right, numbers.Number):
left = transform_boolean_operand_to_numeric(left, spark_type=as_spark_type(type(right)))
return left ** right
else:
assert isinstance(right, IndexOpsMixin)
left = transform_boolean_operand_to_numeric(left, spark_type=right.spark.data_type)
return left ** right
def radd(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, bool):
return left.__or__(right)
elif isinstance(right, numbers.Number):
left = transform_boolean_operand_to_numeric(left, spark_type=as_spark_type(type(right)))
return right + left
else:
raise TypeError(
"Addition can not be applied to %s and the given type." % self.pretty_name
)
def rsub(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, numbers.Number) and not isinstance(right, bool):
left = transform_boolean_operand_to_numeric(left, spark_type=as_spark_type(type(right)))
return right - left
else:
raise TypeError(
"Subtraction can not be applied to %s and the given type." % self.pretty_name
)
def rmul(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, bool):
return left.__and__(right)
elif isinstance(right, numbers.Number):
left = transform_boolean_operand_to_numeric(left, spark_type=as_spark_type(type(right)))
return right * left
else:
raise TypeError(
"Multiplication can not be applied to %s and the given type." % self.pretty_name
)
def rtruediv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, numbers.Number) and not isinstance(right, bool):
left = transform_boolean_operand_to_numeric(left, spark_type=as_spark_type(type(right)))
return right / left
else:
raise TypeError(
"True division can not be applied to %s and the given type." % self.pretty_name
)
def rfloordiv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, numbers.Number) and not isinstance(right, bool):
left = transform_boolean_operand_to_numeric(left, spark_type=as_spark_type(type(right)))
return right // left
else:
raise TypeError(
"Floor division can not be applied to %s and the given type." % self.pretty_name
)
def rpow(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, numbers.Number) and not isinstance(right, bool):
left = transform_boolean_operand_to_numeric(left, spark_type=as_spark_type(type(right)))
return right ** left
else:
raise TypeError(
"Exponentiation can not be applied to %s and the given type." % self.pretty_name
)
def rmod(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, numbers.Number) and not isinstance(right, bool):
left = transform_boolean_operand_to_numeric(left, spark_type=as_spark_type(type(right)))
return right % left
else:
raise TypeError(
"Modulo can not be applied to %s and the given type." % self.pretty_name
)
def __and__(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, IndexOpsMixin) and isinstance(right.dtype, extension_dtypes):
return right.__and__(left)
else:
def and_func(left: Column, right: Any) -> Column:
if not isinstance(right, Column):
if pd.isna(right):
right = SF.lit(None)
else:
right = SF.lit(right)
scol = left & right
return F.when(scol.isNull(), False).otherwise(scol)
return column_op(and_func)(left, right)
def __or__(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, IndexOpsMixin) and isinstance(right.dtype, extension_dtypes):
return right.__or__(left)
else:
def or_func(left: Column, right: Any) -> Column:
if not isinstance(right, Column) and pd.isna(right):
return SF.lit(False)
else:
scol = left | SF.lit(right)
return F.when(left.isNull() | scol.isNull(), False).otherwise(scol)
return column_op(or_func)(left, right)
def astype(self, index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
dtype, spark_type = pandas_on_spark_type(dtype)
if isinstance(dtype, CategoricalDtype):
return _as_categorical_type(index_ops, dtype, spark_type)
elif isinstance(spark_type, BooleanType):
return _as_bool_type(index_ops, dtype)
elif isinstance(spark_type, StringType):
if isinstance(dtype, extension_dtypes):
scol = F.when(
index_ops.spark.column.isNotNull(),
F.when(index_ops.spark.column, "True").otherwise("False"),
)
nullable = index_ops.spark.nullable
else:
null_str = str(pd.NA) if isinstance(self, BooleanExtensionOps) else str(None)
casted = F.when(index_ops.spark.column, "True").otherwise("False")
scol = F.when(index_ops.spark.column.isNull(), null_str).otherwise(casted)
nullable = False
return index_ops._with_new_scol(
scol,
field=index_ops._internal.data_fields[0].copy(
dtype=dtype, spark_type=spark_type, nullable=nullable
),
)
else:
return _as_other_type(index_ops, dtype, spark_type)
def neg(self, operand: IndexOpsLike) -> IndexOpsLike:
return ~operand
def abs(self, operand: IndexOpsLike) -> IndexOpsLike:
return operand
def lt(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
return column_op(Column.__lt__)(left, right)
def le(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
return column_op(Column.__le__)(left, right)
def ge(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
return column_op(Column.__ge__)(left, right)
def gt(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
return column_op(Column.__gt__)(left, right)
def invert(self, operand: IndexOpsLike) -> IndexOpsLike:
return operand._with_new_scol(~operand.spark.column, field=operand._internal.data_fields[0])
class BooleanExtensionOps(BooleanOps):
"""
The class for binary operations of pandas-on-Spark objects with spark type BooleanType,
and dtype BooleanDtype.
"""
@property
def pretty_name(self) -> str:
return "booleans"
def __and__(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
def and_func(left: Column, right: Any) -> Column:
if not isinstance(right, Column):
if pd.isna(right):
right = SF.lit(None)
else:
right = SF.lit(right)
return left & right
return column_op(and_func)(left, right)
def __or__(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
def or_func(left: Column, right: Any) -> Column:
if not isinstance(right, Column):
if | pd.isna(right) | pandas.isna |
"""Base class for working with order records."""
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from vectorbt.utils.decorators import cached_property, cached_method
from vectorbt.utils.colors import adjust_lightness
from vectorbt.utils.enum import to_value_map
from vectorbt.utils.widgets import FigureWidget
from vectorbt.utils.config import merge_dicts
from vectorbt.base.reshape_fns import to_1d, to_2d, broadcast_to
from vectorbt.records.base import Records
from vectorbt.portfolio.enums import order_dt, OrderSide
class Orders(Records):
"""Extends `Records` for working with order records.
## Example
Get the total number of buy and sell operations:
```python-repl
>>> import vectorbt as vbt
>>> import pandas as pd
>>> price = pd.Series([1., 2., 3., 2., 1.])
>>> size = pd.Series([1., 1., 1., 1., -1.])
>>> orders = vbt.Portfolio.from_orders(price, size).orders
>>> orders.buy.count()
4
>>> orders.sell.count()
1
```
"""
def __init__(self, wrapper, records_arr, close, idx_field='idx', **kwargs):
Records.__init__(
self,
wrapper,
records_arr,
idx_field=idx_field,
close=close,
**kwargs
)
self._close = broadcast_to(close, wrapper.dummy(group_by=False))
if not all(field in records_arr.dtype.names for field in order_dt.names):
raise TypeError("Records array must match order_dt")
def _indexing_func_meta(self, pd_indexing_func):
"""Perform indexing on `Orders` and return metadata."""
new_wrapper, new_records_arr, group_idxs, col_idxs = \
Records._indexing_func_meta(self, pd_indexing_func)
new_close = new_wrapper.wrap(to_2d(self.close, raw=True)[:, col_idxs], group_by=False)
return self.copy(
wrapper=new_wrapper,
records_arr=new_records_arr,
close=new_close
), group_idxs, col_idxs
def _indexing_func(self, pd_indexing_func):
"""Perform indexing on `Orders`."""
return self._indexing_func_meta(pd_indexing_func)[0]
@property
def close(self):
"""Reference price such as close."""
return self._close
@property # no need for cached
def records_readable(self):
"""Records in readable format."""
records_df = self.records
out = | pd.DataFrame() | pandas.DataFrame |
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from pathlib import Path
from neatrader.preprocess import CsvImporter
from neatrader.model import Security
from datetime import datetime
raw = | pd.read_csv('data/TSLA/chains/200207.csv') | pandas.read_csv |
import numpy as np
import matplotlib.pyplot as plt
import seaborn
import pandas as pd
from datetime import datetime
from dateutil import parser
import time
from scipy.stats import gaussian_kde
from sklearn.gaussian_process import GaussianProcessClassifier
from pandas_datareader import data
import numexpr as ne
seaborn.set()
def numpy_learner_func():
# numpy 计数
rng = np.random.RandomState(0)
x_data = rng.randint(10, size=(3, 4))
print("rng:{}".format(rng))
print("x_data{}".format(x_data))
# 计小于6的数 , np.count_nonzero, np.sum, np.where
num1 = np.count_nonzero(x_data < 6)
num2 = np.sum(x_data < 6)
num3 = np.any(x_data < 6)
num4 = np.all(x_data < 6)
num5 = np.where(x_data < 6)[0]
print(x_data < 6, num3, num4, num5, num5.shape[0])
print("num1 is {}".format(num1))
print("num2 is {}".format(num2))
print(x_data[x_data < 6])
print(9 and 0)
# numpy newaxis 给数组新增维度
x = np.arange(3)
print(x, x.shape)
x1 = x[:, np.newaxis]
print(x1, x1.shape)
x2 = x[:, np.newaxis, np.newaxis]
print(x2, x2.shape)
x3 = np.zeros(10)
np.add.at(x3, [0, 1, 5], 1)
print(x3)
# print("x4 is {}".format(x4))
i = [2, 3, 3, 4, 4, 4]
x3[i] += 1
print(x3)
# np.random.seed(42)
x_np = np.random.randn(100)
bins = np.linspace(-5, 5, 20)
# zeros_like 返回与参数一样shape的数组
counts = np.zeros_like(bins)
print("counts is {}".format(counts))
# np.searchsorted 将数字x_np插入到排好序的list中,返回相应的下标
j = np.searchsorted(bins, x_np)
print("j is {}".format(j))
# np.searchsorted()
# ## numpy 排序 np.sort()返回排好序的新数组
srt_array = np.array([2, 1, 4, 3, 5])
print("sorted:{}".format(np.sort(srt_array)))
# x.sort() Python内置函数sort(),对原数组进行排序,返回原数组
print("x.sort() is {}".format(srt_array.sort()))
sorted_arr = np.array([99, 0, 3, 1, 90])
# np.argsort()返回数组中排序之后的下标
print("np.argsort(srt_array) is {}".format(np.argsort(sorted_arr)))
# np.sort(axis = None)按照维度排序
axis_arr = np.random.RandomState(42).randint(0, 10, (4, 6))
print("the array is {}".format(axis_arr))
print("sort each column of axis_arr, returns {}".format(np.sort(axis_arr, axis=0)))
print("sort each row of axis_arr, returns {}".format(np.sort(axis_arr, axis=1)))
# 部分排序, 分区排序
np_part = np.array([3, 8, 4, 99, 5, 1, 88]) # 1 3 4 5 88 99 3,4, 1, 5,8, 99, 88
print("np_part partition sorted is {}".format(np.partition(np_part, 3,)))
def K_nearest_neighbors_func():
X = np.random.RandomState(42).rand(10, 2) # 10X2 array
plt.scatter(X[:, 0], X[:, 1], s=100)
x_newaxis = X[:, np.newaxis, :]
print("X[:, np.newaxis, :]:", x_newaxis)
print(x_newaxis.shape)
x_newaxis_1 = X[np.newaxis, :, :]
print("x_newaxis_1:", x_newaxis_1)
print(x_newaxis_1.shape)
diff_newaxis = x_newaxis - x_newaxis_1
print("diff_newaxis:", diff_newaxis, diff_newaxis.shape)
sq_differences = diff_newaxis ** 2
dist_sq = sq_differences.sum(-1) # axis 从倒数第2个到第一个
print("dist_sq:", dist_sq, sq_differences.shape, dist_sq.shape)
eye_dist_sq = dist_sq.diagonal() # 返回指定矩阵的对角线
print("eye_dist_sq is {}".format(eye_dist_sq))
nearest = np.argsort(dist_sq, axis=1) # 对列进行从小到大排序,返回排好序之后的索引值
K = 2
nearest_partition = np.argpartition(dist_sq, K+1, axis=1) # 分区排序,返回排好序的索引值
# print("nearest_partition.shape is {}".format(nearest_partition.shape))
# #
# # dis_sq = np.sum((X[:, np.newaxis, :] - X[np.newaxis, :, :])**2, axis=-1)
for i in range(X.shape[0]):
for j in nearest_partition[i, :K+1]:
plt.plot(*zip(X[j], X[i]), color='black')
# k_nearest_neighbors_loop_func(X, K)
plt.show()
def k_nearest_neighbors_loop_func(X, K):
all_dist = {}
index_dict = {}
# 计算每个点与其他点之间的距离并按序排列
for i in range(X.shape[0]):
start_point = X[i, :]
start_point_dis = {}
for j in range(X.shape[0]):
if i != j:
dis = np.sqrt((start_point[0] - X[j, 0])**2 + (start_point[1] - X[j, 1])**2)
# start_point_dis.append(dis)
start_point_dis[j] = dis
# 字典排序,按照值
sorted_start_point_dis = {}
# for item in dict_a.items():
# print(item)
# out.append((item[1], item[0]))
# print(out, sorted(out))
inter_list = sorted(start_point_dis.items(), key = lambda kv:(kv[1], kv[0]))
for each in inter_list:
sorted_start_point_dis[each[0]] = each[1]
all_dist[i] = list(sorted_start_point_dis.keys())[:K]
# 取出最近的两个点index
for a in range(X.shape[0]):
for b in all_dist[a]:
print("a, b", a, b)
plt.plot(*zip(X[a, :], X[b, :]), color='blue')
plt.show()
# print(all_dist)
def pandas_learner():
# pandas 里面的index 是不可变数组或者允许存在重复值的有序集合
indA = pd.Index([1, 3, 5, 7, 9])
indB = pd.Index([2, 3, 5, 7, 11])
index1 = indA & indB # 交集
index2 = indA | indB # 全集
index3 = indA ^ indB # 差集
print(index1, index2, index3)
data = pd.Series([0.25, 0.5, 0.75, 1.0],
index=['a', 'b', 'c', 'd'])
print(data['b'])
print('a' in data)
print(data.keys())
print(list(data.items()))
data['e'] = 1.25
print(data['a': 'c']) # 切片, 包含c列
print(data[0:2])
print(data[(data > 0.3) & (data < 0.8)])
print(data[['a', 'e']])
# loc 根据列标签索引访问
print(data[1])
print(data[1:3])
print(data.loc['a'])
# iloc根据行下标访问行
print(data.iloc[1])
print(data.iloc[1:3])
def pandas_null():
valsl = np.array([1, np.nan, 3, 4])
print(valsl.dtype)
print(1+np.nan)
print(0*np.nan)
print(np.sum(valsl), np.min(valsl), np.max(valsl)) # 任何累加和计算,最大值,最小值聚类函数中含有nan,其结果都是nan
print(np.nansum(valsl), np.nanmin(valsl), np.nanmax(valsl)) # 忽略nan值,计算累加和,最小值,最大值
print(np.nan == None)
data = pd.Series([1, np.nan, 'hello', None])
print(data.isnull())
print(data.notnull())
print(data[data.notnull()])
print("dropnan:", data.dropna())
data_df = pd.DataFrame([[1, np.nan, 2], [2, 3, 5], [np.nan, 4, 6]])
print(data_df.dropna())
print(data_df.dropna(axis='columns'))
data_df[3] = np.nan
print(data_df.dropna(axis='columns', how='all'))
print(data_df.dropna(axis='columns', how='any'))
print(data_df.dropna(axis='rows', thresh=3))
def numpy_learner():
df = pd.DataFrame({'key': ['A', 'B', 'C', 'A', 'B', 'C'], 'data': range(6)}, columns=['key', 'data'])
print('df is {} \n'.format(df))
df.groupby('key')
print("df.groupby('key')".format(df))
print('DataFrames is'.format(df.groupby('key').sum()))
print('sum is sum() is {}'.format(df.groupby('key').sum()))
print("planets.groupby('method')".format())
def pandas_aggregation_group():
rng = np.random.RandomState(42)
ser = pd.Series(rng.rand(5))
print(ser.mean())
print(ser.sum())
df = pd.DataFrame({'A': rng.rand(5), 'B': rng.rand(5)})
print(df.mean(axis='columns'))
df_data = pd.DataFrame({'key': ['A', 'B', 'C', 'D'], 'data': range(6)}, columns=['key', 'data'])
print(df_data)
# print()
def learn_pivot_table():
# openurl失败,将数据集下载到本地,并引用
titanic = seaborn.load_dataset('titanic', cache=True, data_home="./seaborn-data")
print(titanic.head())
print(titanic.groupby('sex')[['survived']].mean())
print(titanic.groupby(['sex', 'class'])['survived'].aggregate('mean').unstack())
# 透视表
t_pivot_table = titanic.pivot_table('survived', index='sex', columns='class')
print(t_pivot_table)
# fare = pd.qcut(titanic, 2)
age = pd.cut(titanic['age'], [0, 18, 80])
age_table = titanic.pivot_table('survived', ['sex', age], 'class')
print(age_table)
fare = pd.qcut(titanic['fare'], 2)
fare_table = titanic.pivot_table('survived', ['sex', age], [fare, 'class'])
print(fare_table)
def working_with_strings():
# pandas 能够向量化处理string类型的数据
data = ['Peter', 'Paul', None, 'MARY', 'gUIDO']
names = pd.Series(data)
print(names)
print(names.str.capitalize())
monte = | pd.Series(['<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>']) | pandas.Series |
#------------------------------------------------------------------------------------------------------------#
#Chapter 1 - Clustering for dataset exploration
#------------------------------------------------------------------------------------------------------------#
#Clustering 2D points
# Import KMeans
from sklearn.cluster import KMeans
# Create a KMeans instance with 3 clusters: model
model = KMeans(n_clusters=3)
# Fit model to points
model.fit(points)
# Determine the cluster labels of new_points: labels
labels = model.predict(new_points)
# Print cluster labels of new_points
print(labels)
#------------------------------------------------------------------------------------------------------------#
#Inspect your clustering
# Import pyplot
import matplotlib.pyplot as plt
# Assign the columns of new_points: xs and ys
xs = new_points[:,0]
ys = new_points[:,1]
# Make a scatter plot of xs and ys, using labels to define the colors
plt.scatter(xs,ys,c=labels,alpha=0.5)
# Assign the cluster centers: centroids
centroids = model.cluster_centers_
# Assign the columns of centroids: centroids_x, centroids_y
centroids_x = centroids[:,0]
centroids_y = centroids[:,1]
# Make a scatter plot of centroids_x and centroids_y
plt.scatter(centroids_x,centroids_y,marker='D',s=50)
plt.show()
#------------------------------------------------------------------------------------------------------------#
#How many clusters of grain?
ks = range(1, 6)
inertias = []
for k in ks:
# Create a KMeans instance with k clusters: model
model = KMeans(n_clusters=k)
# Fit model to samples
model.fit(samples)
# Append the inertia to the list of inertias
inertias.append(model.inertia_)
# Plot ks vs inertias
plt.plot(ks, inertias, '-o')
plt.xlabel('number of clusters, k')
plt.ylabel('inertia')
plt.xticks(ks)
plt.show()
#------------------------------------------------------------------------------------------------------------#
#Evaluating the grain clustering
# Create a KMeans model with 3 clusters: model
model = KMeans(n_clusters=3)
# Use fit_predict to fit model and obtain cluster labels: labels
labels = model.fit_predict(samples)
# Create a DataFrame with labels and varieties as columns: df
df = pd.DataFrame({'labels': labels, 'varieties': varieties})
# Create crosstab: ct
ct = pd.crosstab(df['labels'],df['varieties'])
# Display ct
print(ct)
#------------------------------------------------------------------------------------------------------------#
#Scaling fish data for clustering
# Perform the necessary imports
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
# Create scaler: scaler
scaler = StandardScaler()
# Create KMeans instance: kmeans
kmeans = KMeans(n_clusters=4)
# Create pipeline: pipeline
pipeline = make_pipeline(scaler,kmeans)
#------------------------------------------------------------------------------------------------------------#
#Clustering the fish data
# Import pandas
import pandas as pd
# Fit the pipeline to samples
pipeline.fit(samples)
# Calculate the cluster labels: labels
labels = pipeline.predict(samples)
# Create a DataFrame with labels and species as columns: df
df = pd.DataFrame({'labels':labels,'species':species})
# Create crosstab: ct
ct = | pd.crosstab(df['labels'],df['species']) | pandas.crosstab |
import operator
from operator import methodcaller
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.testing as tm
import pytest
import ibis
import ibis.expr.datatypes as dt
from ... import connect, execute
pytestmark = pytest.mark.pandas
def test_table_column(t, df):
expr = t.plain_int64
result = expr.execute()
expected = df.plain_int64
tm.assert_series_equal(result, expected)
def test_literal(client):
assert client.execute(ibis.literal(1)) == 1
def test_read_with_undiscoverable_type(client):
with pytest.raises(TypeError):
client.table('df')
def test_selection(t, df):
expr = t[
((t.plain_strings == 'a') | (t.plain_int64 == 3))
& (t.dup_strings == 'd')
]
result = expr.execute()
expected = df[
((df.plain_strings == 'a') | (df.plain_int64 == 3))
& (df.dup_strings == 'd')
].reset_index(drop=True)
tm.assert_frame_equal(result[expected.columns], expected)
def test_mutate(t, df):
expr = t.mutate(x=t.plain_int64 + 1, y=t.plain_int64 * 2)
result = expr.execute()
expected = df.assign(x=df.plain_int64 + 1, y=df.plain_int64 * 2)
tm.assert_frame_equal(result[expected.columns], expected)
def test_project_scope_does_not_override(t, df):
col = t.plain_int64
expr = t[
[
col.name('new_col'),
col.sum()
.over(ibis.window(group_by='dup_strings'))
.name('grouped'),
]
]
result = expr.execute()
expected = pd.concat(
[
df[['plain_int64', 'dup_strings']].rename(
columns={'plain_int64': 'new_col'}
),
df.groupby('dup_strings')
.plain_int64.transform('sum')
.reset_index(drop=True)
.rename('grouped'),
],
axis=1,
)[['new_col', 'grouped']]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
'where',
[
lambda t: None,
lambda t: t.dup_strings == 'd',
lambda t: (t.dup_strings == 'd') | (t.plain_int64 < 100),
],
)
@pytest.mark.parametrize(
('ibis_func', 'pandas_func'),
[
(methodcaller('abs'), np.abs),
(methodcaller('ceil'), np.ceil),
(methodcaller('exp'), np.exp),
(methodcaller('floor'), np.floor),
(methodcaller('ln'), np.log),
(methodcaller('log10'), np.log10),
(methodcaller('log', 2), lambda x: np.log(x) / np.log(2)),
(methodcaller('log2'), np.log2),
(methodcaller('round', 0), lambda x: x.round(0).astype('int64')),
(methodcaller('round', -2), methodcaller('round', -2)),
(methodcaller('round', 2), methodcaller('round', 2)),
(methodcaller('round'), lambda x: x.round().astype('int64')),
(methodcaller('sign'), np.sign),
(methodcaller('sqrt'), np.sqrt),
],
)
def test_aggregation_group_by(t, df, where, ibis_func, pandas_func):
ibis_where = where(t)
expr = t.group_by(t.dup_strings).aggregate(
avg_plain_int64=t.plain_int64.mean(where=ibis_where),
sum_plain_float64=t.plain_float64.sum(where=ibis_where),
mean_float64_positive=ibis_func(t.float64_positive).mean(
where=ibis_where
),
neg_mean_int64_with_zeros=(-t.int64_with_zeros).mean(where=ibis_where),
nunique_dup_ints=t.dup_ints.nunique(),
)
result = expr.execute()
pandas_where = where(df)
mask = slice(None) if pandas_where is None else pandas_where
expected = (
df.groupby('dup_strings')
.agg(
{
'plain_int64': lambda x, mask=mask: x[mask].mean(),
'plain_float64': lambda x, mask=mask: x[mask].sum(),
'dup_ints': 'nunique',
'float64_positive': (
lambda x, mask=mask, func=pandas_func: func(x[mask]).mean()
),
'int64_with_zeros': lambda x, mask=mask: (-x[mask]).mean(),
}
)
.reset_index()
.rename(
columns={
'plain_int64': 'avg_plain_int64',
'plain_float64': 'sum_plain_float64',
'dup_ints': 'nunique_dup_ints',
'float64_positive': 'mean_float64_positive',
'int64_with_zeros': 'neg_mean_int64_with_zeros',
}
)
)
# TODO(phillipc): Why does pandas not return floating point values here?
expected['avg_plain_int64'] = expected.avg_plain_int64.astype('float64')
result['avg_plain_int64'] = result.avg_plain_int64.astype('float64')
expected[
'neg_mean_int64_with_zeros'
] = expected.neg_mean_int64_with_zeros.astype('float64')
result[
'neg_mean_int64_with_zeros'
] = result.neg_mean_int64_with_zeros.astype('float64')
expected['mean_float64_positive'] = expected.mean_float64_positive.astype(
'float64'
)
result['mean_float64_positive'] = result.mean_float64_positive.astype(
'float64'
)
lhs = result[expected.columns]
rhs = expected
tm.assert_frame_equal(lhs, rhs)
def test_aggregation_without_group_by(t, df):
expr = t.aggregate(
avg_plain_int64=t.plain_int64.mean(),
sum_plain_float64=t.plain_float64.sum(),
)
result = expr.execute()[['avg_plain_int64', 'sum_plain_float64']]
new_names = {
'plain_float64': 'sum_plain_float64',
'plain_int64': 'avg_plain_int64',
}
expected = (
pd.Series(
[df['plain_int64'].mean(), df['plain_float64'].sum()],
index=['plain_int64', 'plain_float64'],
)
.to_frame()
.T.rename(columns=new_names)
)
tm.assert_frame_equal(result[expected.columns], expected)
def test_group_by_with_having(t, df):
expr = (
t.group_by(t.dup_strings)
.having(t.plain_float64.sum() == 5)
.aggregate(avg_a=t.plain_int64.mean(), sum_c=t.plain_float64.sum())
)
result = expr.execute()
expected = (
df.groupby('dup_strings')
.agg({'plain_int64': 'mean', 'plain_float64': 'sum'})
.reset_index()
.rename(columns={'plain_int64': 'avg_a', 'plain_float64': 'sum_c'})
)
expected = expected.loc[expected.sum_c == 5, ['avg_a', 'sum_c']]
tm.assert_frame_equal(result[expected.columns], expected)
def test_group_by_rename_key(t, df):
expr = t.groupby(t.dup_strings.name('foo')).aggregate(
dup_string_count=t.dup_strings.count()
)
assert 'foo' in expr.schema()
result = expr.execute()
assert 'foo' in result.columns
expected = (
df.groupby('dup_strings')
.dup_strings.count()
.rename('dup_string_count')
.reset_index()
.rename(columns={'dup_strings': 'foo'})
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('reduction', ['mean', 'sum', 'count', 'std', 'var'])
@pytest.mark.parametrize(
'where',
[
lambda t: (t.plain_strings == 'a') | (t.plain_strings == 'c'),
lambda t: (t.dup_strings == 'd')
& ((t.plain_int64 == 1) | (t.plain_int64 == 3)),
lambda t: None,
],
)
def test_reduction(t, df, reduction, where):
func = getattr(t.plain_int64, reduction)
mask = where(t)
expr = func(where=mask)
result = expr.execute()
df_mask = where(df)
expected_func = getattr(
df.loc[df_mask if df_mask is not None else slice(None), 'plain_int64'],
reduction,
)
expected = expected_func()
assert result == expected
@pytest.mark.parametrize(
'reduction',
[
lambda x: x.any(),
lambda x: x.all(),
lambda x: ~(x.any()),
lambda x: ~(x.all()),
],
)
def test_boolean_aggregation(t, df, reduction):
expr = reduction(t.plain_int64 == 1)
result = expr.execute()
expected = reduction(df.plain_int64 == 1)
assert result == expected
@pytest.mark.parametrize('column', ['float64_with_zeros', 'int64_with_zeros'])
def test_null_if_zero(t, df, column):
expr = t[column].nullifzero()
result = expr.execute()
expected = df[column].replace(0, np.nan)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('left', 'right', 'expected', 'compare'),
[
pytest.param(
lambda t: ibis.literal(1),
lambda t: ibis.literal(1),
lambda df: np.nan,
np.testing.assert_array_equal, # treats NaNs as equal
id='literal_literal_equal',
),
pytest.param(
lambda t: ibis.literal(1),
lambda t: ibis.literal(2),
lambda df: 1,
np.testing.assert_equal,
id='literal_literal_not_equal',
),
pytest.param(
lambda t: t.dup_strings,
lambda t: ibis.literal('a'),
lambda df: df.dup_strings.where(df.dup_strings != 'a'),
tm.assert_series_equal,
id='series_literal',
),
pytest.param(
lambda t: t.dup_strings,
lambda t: t.dup_strings,
lambda df: df.dup_strings.where(df.dup_strings != df.dup_strings),
tm.assert_series_equal,
id='series_series',
),
pytest.param(
lambda t: ibis.literal('a'),
lambda t: t.dup_strings,
lambda df: pd.Series(
np.where(df.dup_strings == 'a', np.nan, 'a'), index=df.index
),
tm.assert_series_equal,
id='literal_series',
),
],
)
def test_nullif(t, df, left, right, expected, compare):
expr = left(t).nullif(right(t))
result = execute(expr)
compare(result, expected(df))
def test_nullif_inf():
df = pd.DataFrame({'a': [np.inf, 3.14, -np.inf, 42.0]})
con = connect({'t': df})
t = con.table('t')
expr = t.a.nullif(np.inf).nullif(-np.inf)
result = expr.execute()
expected = pd.Series([np.nan, 3.14, np.nan, 42.0], name='a')
tm.assert_series_equal(result, expected)
def test_group_concat(t, df):
expr = t.groupby(t.dup_strings).aggregate(
foo=t.plain_int64.group_concat(',')
)
result = expr.execute()
expected = (
df.groupby('dup_strings')
.apply(lambda df: ','.join(df.plain_int64.astype(str)))
.reset_index()
.rename(columns={0: 'foo'})
)
tm.assert_frame_equal(result[expected.columns], expected)
@pytest.mark.parametrize('offset', [0, 2])
def test_frame_limit(t, df, offset):
n = 5
df_expr = t.limit(n, offset=offset)
result = df_expr.execute()
expected = df.iloc[offset : offset + n].reset_index(drop=True)
tm.assert_frame_equal(result[expected.columns], expected)
@pytest.mark.xfail(
raises=AttributeError, reason='TableColumn does not implement limit'
)
@pytest.mark.parametrize('offset', [0, 2])
def test_series_limit(t, df, offset):
n = 5
s_expr = t.plain_int64.limit(n, offset=offset)
result = s_expr.execute()
tm.assert_series_equal(result, df.plain_int64.iloc[offset : offset + n])
@pytest.mark.parametrize(
('key', 'pandas_by', 'pandas_ascending'),
[
(lambda t, col: [ibis.desc(t[col])], lambda col: [col], False),
(
lambda t, col: [t[col], ibis.desc(t.plain_int64)],
lambda col: [col, 'plain_int64'],
[True, False],
),
(
lambda t, col: [ibis.desc(t.plain_int64 * 2)],
lambda col: ['plain_int64'],
False,
),
],
)
@pytest.mark.parametrize(
'column',
['plain_datetimes_naive', 'plain_datetimes_ny', 'plain_datetimes_utc'],
)
def test_sort_by(t, df, column, key, pandas_by, pandas_ascending):
expr = t.sort_by(key(t, column))
result = expr.execute()
expected = df.sort_values(
pandas_by(column), ascending=pandas_ascending
).reset_index(drop=True)
tm.assert_frame_equal(result[expected.columns], expected)
def test_complex_sort_by(t, df):
expr = t.sort_by(
[ibis.desc(t.plain_int64 * t.plain_float64), t.plain_float64]
)
result = expr.execute()
expected = (
df.assign(foo=df.plain_int64 * df.plain_float64)
.sort_values(['foo', 'plain_float64'], ascending=[False, True])
.drop(['foo'], axis=1)
.reset_index(drop=True)
)
tm.assert_frame_equal(result[expected.columns], expected)
def test_distinct(t, df):
expr = t.dup_strings.distinct()
result = expr.execute()
expected = pd.Series(df.dup_strings.unique(), name='dup_strings')
| tm.assert_series_equal(result, expected) | pandas.testing.assert_series_equal |
import pandas
import simtk.openmm
import os
import shutil
# imports for accessibility outside
import functionTerms
functionTerms=functionTerms
# Reads pdb file to a table
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
_AWSEMresidues = ['IPR', 'IGL', 'NGP']
xml = f'{__location__}/awsem.xml'
def parsePDB(pdb_file):
def pdb_line(line):
return dict(recname=str(line[0:6]).strip(),
serial=int(line[6:11]),
name=str(line[12:16]).strip(),
altLoc=str(line[16:17]),
resname=str(line[17:20]).strip(),
chainID=str(line[21:22]),
resSeq=int(line[22:26]),
iCode=str(line[26:27]),
x=float(line[30:38]),
y=float(line[38:46]),
z=float(line[46:54]),
occupancy=0.0 if line[54:60].strip() == '' else float(line[54:60]),
tempFactor=0.0 if line[60:66].strip() == '' else float(line[60:66]),
element=str(line[76:78]),
charge=str(line[78:80]))
with open(pdb_file, 'r') as pdb:
lines = []
for line in pdb:
if len(line) > 6 and line[:6] in ['ATOM ', 'HETATM']:
lines += [pdb_line(line)]
pdb_atoms = pandas.DataFrame(lines)
pdb_atoms = pdb_atoms[['recname', 'serial', 'name', 'altLoc',
'resname', 'chainID', 'resSeq', 'iCode',
'x', 'y', 'z', 'occupancy', 'tempFactor',
'element', 'charge']]
return pdb_atoms
def parseConfigTable(config_section):
"""Parses a section of the configuration file as a table"""
def readData(config_section, a):
"""Filters comments and returns values as a list"""
temp = config_section.get(a).split('#')[0].split()
l = []
for val in temp:
val = val.strip()
try:
x = int(val)
l += [x]
except ValueError:
try:
y = float(val)
l += [y]
except ValueError:
l += [val]
return l
data = []
for a in config_section:
if a == 'name':
columns = readData(config_section, a)
elif len(a) > 3 and a[:3] == 'row':
data += [readData(config_section, a)]
else:
print(f'Unexpected row {readData(config_section, a)}')
return pandas.DataFrame(data, columns=columns)
def copy_parameter_files():
src = f"{__location__}/parameters"
dest = '.'
src_files = os.listdir(src)
for file_name in src_files:
full_file_name = os.path.join(src, file_name)
if os.path.isfile(full_file_name):
shutil.copy(full_file_name, dest)
class BaseError(Exception):
pass
class Protein(object):
def __init__(self, atoms, sequence, k_awsem=1):
self.atoms = atoms
#Include real residue name in atoms
atoms = self.atoms.copy()
atoms['chain_res'] = atoms['chainID'].astype(str) + '_' + atoms['resSeq'].astype(str)
sel = atoms[atoms['resname'].isin(_AWSEMresidues)]
resix = sel['chain_res'].unique()
assert len(resix) == len(sequence), \
f'The number of residues {len(resix)} does not agree with the length of the sequence {len(sequence)}'
atoms.index = atoms['chain_res']
for r, s in zip(resix, sequence):
atoms.loc[r, 'real_resname'] = s
atoms.index = range(len(atoms))
self.atoms = atoms
protein_data = atoms[atoms.resname.isin(_AWSEMresidues)].copy()
# renumber residues
resix = (protein_data.chainID + '_' + protein_data.resSeq.astype(str))
res_unique = resix.unique()
protein_data['resID'] = resix.replace(dict(zip(res_unique, range(len(res_unique)))))
# renumber atom types
atom_types_table = {'N': 'n', 'H': 'h', 'CA': 'ca', 'C': 'c', 'O': 'o', 'CB': 'cb'}
protein_data['atom_list'] = protein_data['name'].replace(atom_types_table)
protein_data['idx'] = protein_data.index.astype(int)
self.protein_data = protein_data
self.atom_lists = protein_data.pivot(index='resID', columns='atom_list', values='idx').fillna(-1).astype(int)
self.n = self.atom_lists['n'].tolist()
self.h = self.atom_lists['h'].tolist()
self.ca = self.atom_lists['ca'].tolist()
self.c = self.atom_lists['c'].tolist()
self.o = self.atom_lists['o'].tolist()
self.cb = self.atom_lists['cb'].tolist()
self.nres = len(self.atom_lists)
self.k_awsem = k_awsem
self.res_type = [r.iloc[0]['resname'] for i, r in protein_data.groupby('resID')]
self.chain_starts = [c.iloc[0].resID for i, c in protein_data.groupby('chainID')]
self.chain_ends = [c.iloc[-1].resID for i, c in protein_data.groupby('chainID')]
self.natoms = len(atoms)
self.bonds = self._setup_bonds()
self.seq = sequence
self.resi = pandas.merge(self.atoms, self.protein_data, how='left').resID.fillna(-1).astype(int).tolist()
pass
def _setup_bonds(self):
bonds = []
for i in range(self.nres):
bonds.append((self.ca[i], self.o[i]))
if not self.res_type[i] == "IGL":
bonds.append((self.ca[i], self.cb[i]))
if i not in self.chain_ends:
bonds.append((self.ca[i], self.ca[i + 1]))
bonds.append((self.o[i], self.ca[i + 1]))
for i in range(self.nres):
if i not in self.chain_starts and not self.res_type[i] == "IGL":
bonds.append((self.n[i], self.cb[i]))
if i not in self.chain_ends and not self.res_type[i] == "IGL":
bonds.append((self.c[i], self.cb[i]))
if i not in self.chain_starts and i not in self.chain_ends:
bonds.append((self.n[i], self.c[i]))
return bonds
def setup_virtual_sites(self, system, ):
# set virtual sites
for i in range(self.nres):
if i not in self.chain_starts:
n_virtual_site = simtk.openmm.ThreeParticleAverageSite(self.ca[i - 1], self.ca[i], self.o[i - 1],
0.48318, 0.70328, -0.18643)
system.setVirtualSite(self.n[i], n_virtual_site)
if not self.res_type[i] == "IPR":
h_virtual_site = simtk.openmm.ThreeParticleAverageSite(self.ca[i - 1], self.ca[i], self.o[i - 1],
0.84100, 0.89296, -0.73389)
system.setVirtualSite(self.h[i], h_virtual_site)
if i not in self.chain_ends:
c_virtual_site = simtk.openmm.ThreeParticleAverageSite(self.ca[i], self.ca[i + 1], self.o[i],
0.44365, 0.23520, 0.32115)
# print("Virtual", c[i])
system.setVirtualSite(self.c[i], c_virtual_site)
@classmethod
def fromPDB(cls, pdb, pdbout='CoarseProtein.pdb'):
""" Initializes a protein form a pdb, making all the atoms coarse-grained"""
pass
@classmethod
def fromCoarsePDB(cls, pdb_file, sequence):
""" Initializes the protein from an already coarse grained pdb"""
atoms = parsePDB(pdb_file)
return cls(atoms, sequence)
def parseConfigurationFile(self):
""" Parses the AWSEM configuration file to use for the topology and to set the forces"""
pass
def computeTopology(self):
""" Compute the bonds and angles from the pdb"""
pass
@staticmethod
def CoarseGrain(pdb_table):
""" Selects AWSEM atoms from a pdb table and returns a table containing only the coarse-grained atoms for AWSEM """
protein_residues = ['ALA', 'ARG', 'ASN', 'ASP', 'CYS',
'GLN', 'GLU', 'GLY', 'HIS', 'ILE',
'LEU', 'LYS', 'MET', 'PHE', 'PRO',
'SER', 'THR', 'TRP', 'TYR', 'VAL']
awsem_atoms = ["N", "H", "CA", "C", "O", "CB"]
# Select coarse grained atoms
selection = pdb_table[pdb_table.resname.isin(protein_residues) & pdb_table.name.isin(awsem_atoms)].copy()
# Remove virtual atoms at the end or begining of the chain
drop_list = []
for chain in selection.chainID.unique():
sel = selection[selection.chainID == chain]
drop_list += list(sel[(sel.resSeq == sel.resSeq.min()) & sel['name'].isin(['N', 'H'])].index)
drop_list += list(sel[(sel.resSeq == sel.resSeq.max()) & sel['name'].isin(['C'])].index)
selection = selection.drop(drop_list)
# Replace resnames
selection['real_resname'] = selection.resname.copy()
resname = selection.resname.copy()
resname[:] = 'NGP'
resname[selection.resname == 'PRO'] = 'IPR'
resname[selection.resname == 'GLY'] = 'IGL'
selection.resname = resname
# CB element is B
selection.loc[selection['name'] == 'CB', 'element'] = 'B'
# Reorder atoms
selection.name = | pandas.Categorical(selection.name, awsem_atoms) | pandas.Categorical |
import os
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def mssql_url() -> str:
conn = os.environ["MSSQL_URL"]
return conn
@pytest.mark.xfail
def test_on_non_select(mssql_url: str) -> None:
query = "CREATE TABLE non_select(id INTEGER NOT NULL)"
df = read_sql(mssql_url, query)
def test_aggregation(mssql_url: str) -> None:
query = (
"SELECT test_bool, SUM(test_float) as sum FROM test_table GROUP BY test_bool"
)
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"sum": pd.Series([10.9, 5.2, -10.0], dtype="float64"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation(mssql_url: str) -> None:
query = (
"SELECT test_bool, SUM(test_int) AS test_int FROM test_table GROUP BY test_bool"
)
df = read_sql(mssql_url, query, partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"test_int": pd.Series([4, 5, 1315], dtype="Int64"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_aggregation2(mssql_url: str) -> None:
query = "select DISTINCT(test_bool) from test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation2(mssql_url: str) -> None:
query = "select MAX(test_int) as max, MIN(test_int) as min from test_table"
df = read_sql(mssql_url, query, partition_on="max", partition_num=2)
expected = pd.DataFrame(
index=range(1),
data={
"max": pd.Series([1314], dtype="Int64"),
"min": pd.Series([0], dtype="Int64"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_udf(mssql_url: str) -> None:
query = (
"SELECT dbo.increment(test_int) AS test_int FROM test_table ORDER BY test_int"
)
df = read_sql(mssql_url, query, partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 3, 4, 5, 1315], dtype="Int64"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_manual_partition(mssql_url: str) -> None:
queries = [
"SELECT * FROM test_table WHERE test_int < 2",
"SELECT * FROM test_table WHERE test_int >= 2",
]
df = read_sql(mssql_url, query=queries)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([5, 3, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[None, True, False, False, None, True], dtype="boolean"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_without_partition(mssql_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([3, None, 5, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "str2", "a", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, False, None, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_limit_without_partition(mssql_url: str) -> None:
query = "SELECT top 3 * FROM test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_int": pd.Series([1, 2, 0], dtype="int64"),
"test_nullint": pd.Series([3, None, 5], dtype="Int64"),
"test_str": pd.Series(["str1", "str2", "a"], dtype="object"),
"test_float": pd.Series([None, 2.2, 3.1], dtype="float64"),
"test_bool": pd.Series([True, False, None], dtype="boolean"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_limit_large_without_partition(mssql_url: str) -> None:
query = "SELECT top 10 * FROM test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([3, None, 5, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "str2", "a", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, False, None, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_with_partition(mssql_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([5, 3, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[None, True, False, False, None, True], dtype="boolean"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_limit_with_partition(mssql_url: str) -> None:
query = "SELECT top 3 * FROM test_table"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(3),
data={
"test_int": pd.Series([0, 1, 2], dtype="int64"),
"test_nullint": pd.Series([5, 3, None], dtype="Int64"),
"test_str": pd.Series(["a", "str1", "str2"], dtype="object"),
"test_float": pd.Series([3.1, None, 2.20], dtype="float64"),
"test_bool": pd.Series([None, True, False], dtype="boolean"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_limit_large_with_partition(mssql_url: str) -> None:
query = "SELECT top 10 * FROM test_table"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([5, 3, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[None, True, False, False, None, True], dtype="boolean"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_with_partition_without_partition_range(mssql_url: str) -> None:
query = "SELECT * FROM test_table where test_float > 3"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_num=3,
)
expected = pd.DataFrame(
index=range(2),
data={
"test_int": pd.Series([0, 4], dtype="int64"),
"test_nullint": pd.Series([5, 9], dtype="Int64"),
"test_str": pd.Series(["a", "c"], dtype="object"),
"test_float": pd.Series([3.1, 7.8], dtype="float64"),
"test_bool": pd.Series([None, None], dtype="boolean"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_with_partition_and_selection(mssql_url: str) -> None:
query = "SELECT * FROM test_table WHERE 1 = 3 OR 2 = 2"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([5, 3, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[None, True, False, False, None, True], dtype="boolean"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_with_partition_and_projection(mssql_url: str) -> None:
query = "SELECT test_int, test_float, test_str FROM test_table"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_with_partition_and_spja(mssql_url: str) -> None:
query = """
SELECT test_bool, AVG(test_float) AS avg, SUM(test_int) AS sum
FROM test_table AS a, test_str AS b
WHERE a.test_int = b.id AND test_nullint IS NOT NULL
GROUP BY test_bool
ORDER BY sum
"""
df = read_sql(mssql_url, query, partition_on="sum", partition_num=2)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([True, False, None], dtype="boolean"),
"avg": pd.Series([None, 3, 5.45], dtype="float64"),
"sum": pd.Series([1, 3, 4], dtype="Int64"),
},
)
df = df.sort_values("sum").reset_index(drop=True)
assert_frame_equal(df, expected, check_names=True)
def test_empty_result(mssql_url: str) -> None:
query = "SELECT * FROM test_table where test_int < -100"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
data={
"test_int": pd.Series([], dtype="int64"),
"test_nullint": pd.Series([], dtype="Int64"),
"test_str": pd.Series([], dtype="object"),
"test_float": pd.Series([], dtype="float64"),
"test_bool": pd.Series([], dtype="boolean"),
}
)
assert_frame_equal(df, expected, check_names=True)
def test_empty_result_on_partition(mssql_url: str) -> None:
query = "SELECT * FROM test_table where test_int < -100"
df = read_sql(mssql_url, query, partition_on="test_int", partition_num=3)
expected = pd.DataFrame(
data={
"test_int": pd.Series([], dtype="int64"),
"test_nullint": pd.Series([], dtype="Int64"),
"test_str": pd.Series([], dtype="object"),
"test_float": | pd.Series([], dtype="float64") | pandas.Series |
import pandas
from decibel.evaluator import evaluator
from decibel.import_export import filehandler
def compare_chord_labels(chord_label_1_path: str, chord_label_2_path: str):
"""
Compare two chord label sequences
:param chord_label_1_path: Path to .lab file of one chord label sequence
:param chord_label_2_path: Path to .lab file of other chord label sequence
:return: CSR (overlap percentage between the two chord label sequences)
"""
return evaluator.evaluate(chord_label_1_path, chord_label_2_path)[0]
df_combination_and_selection_types = [('rnd', 'all'), ('mv', 'all'), ('df', 'all'),
('rnd', 'best'), ('mv', 'best'), ('df', 'best'),
('df', 'actual-best'),
('rnd', 'alltab'), ('rnd', 'besttab'),
('rnd', 'allmidi'), ('rnd', 'bestmidi'),
('mv', 'alltab'), ('mv', 'besttab'),
('mv', 'allmidi'), ('mv', 'bestmidi'),
('df', 'alltab'), ('df', 'besttab'),
('df', 'allmidi'), ('df', 'bestmidi')]
def print_overlap_audio_df_best(all_songs):
print('Overlap audio and df-best on this audio')
audio_types = ['CHF_2017'] + filehandler.MIREX_SUBMISSION_NAMES
result = dict()
for audio_type in audio_types:
wcsr_numerator = 0
wcsr_denominator = 0
for song_key in all_songs:
song = all_songs[song_key]
if audio_type == 'CHF_2017':
audio_lab_str = song.full_chordify_chord_labs_path
else:
audio_lab_str = filehandler.get_full_mirex_chord_labs_path(song, audio_type)
df_lab_str = filehandler.get_data_fusion_path(song_key, 'DF', 'BEST', audio_type)
if filehandler.file_exists(audio_lab_str) and filehandler.file_exists(df_lab_str):
wcsr_numerator += compare_chord_labels(audio_lab_str, df_lab_str) * song.duration
wcsr_denominator += song.duration
print('Overlap between ' + audio_type + ' and ' + audio_type + '-DF-BEST (WCSR):' +
str(wcsr_numerator / wcsr_denominator))
result[audio_type] = wcsr_numerator / wcsr_denominator
result_series = pandas.Series(result)
return result_series
def print_overlap_audio_methods(all_songs):
print('Overlap audio types (audio only)')
result = dict()
audio_types = ['CHF_2017'] + filehandler.MIREX_SUBMISSION_NAMES
for audio_1 in audio_types:
result[audio_1] = dict()
for audio_2 in audio_types:
wcsr_numerator = 0
wcsr_denominator = 0
for song_key, song in all_songs.items():
if audio_1 == 'CHF_2017':
audio_1_lab = song.full_chordify_chord_labs_path
else:
audio_1_lab = filehandler.get_full_mirex_chord_labs_path(song, audio_1)
if audio_2 == 'CHF_2017':
audio_2_lab = song.full_chordify_chord_labs_path
else:
audio_2_lab = filehandler.get_full_mirex_chord_labs_path(song, audio_2)
if filehandler.file_exists(audio_1_lab) and filehandler.file_exists(audio_2_lab):
wcsr_numerator += compare_chord_labels(audio_1_lab, audio_2_lab) * song.duration
wcsr_denominator += song.duration
result[audio_1][audio_2] = wcsr_numerator / wcsr_denominator
print('Overlap between ' + audio_1 + ' and ' + audio_2 + ':' + str(wcsr_numerator / wcsr_denominator))
result_df = pandas.DataFrame(result)
return result_df
def print_overlap_df_best_methods(all_songs):
print('Overlap audio types (df best)')
result = dict()
audio_types = ['CHF_2017'] + filehandler.MIREX_SUBMISSION_NAMES
for audio_1 in audio_types:
result[audio_1 + '-DF-BEST'] = dict()
for audio_2 in audio_types:
wcsr_numerator = 0
wcsr_denominator = 0
for song_key, song in all_songs.items():
audio_1_df_lab = filehandler.get_data_fusion_path(song_key, 'DF', 'BEST', audio_1)
audio_2_df_lab = filehandler.get_data_fusion_path(song_key, 'DF', 'BEST', audio_2)
if filehandler.file_exists(audio_1_df_lab) and filehandler.file_exists(audio_2_df_lab):
wcsr_numerator += compare_chord_labels(audio_1_df_lab, audio_2_df_lab) * song.duration
wcsr_denominator += song.duration
result[audio_1 + '-DF-BEST'][audio_2 + '-DF-BEST'] = wcsr_numerator / wcsr_denominator
print('Overlap between ' + audio_1 + '-DF-BEST and ' + audio_2 + '-DF-BEST:' +
str(wcsr_numerator / wcsr_denominator))
result_df = | pandas.DataFrame(result) | pandas.DataFrame |
# Copyright (C) 2018 GuQiangJs.
# Licensed under Apache License 2.0 <see LICENSE file>
import datetime
import json
import pandas as pd
from pandas.io.json import json_normalize
from finance_datareader_py import _AbsDailyReader
from finance_datareader_py import sohu
__all__ = ['SohuDailyReader']
class SohuDailyReader(_AbsDailyReader):
"""从sohu读取每日成交汇总数据
Args:
symbols: 股票代码。**此参数只接收单一股票代码**。For example:600001,000002,300002
prefix: 读取股票数据时需要拼接的前缀。默认为 ``cn_``。如果是获取指数时需要使用 ``zs_``。
suffix: 股票代码后缀。默认为空。
* 为空表示会自动根据股票代码判断。
* 对于某些特定指数请自行填写。
prefix: 读取股票数据时需要拼接的前缀。默认为 ``cn_``。如果是获取指数时需要使用 ``zs_``。
start: 开始日期。默认值:2004-10-08
end: 结束日期。默认值:当前日期的 **前一天** 。
retry_count: 重试次数
pause: 重试间隔时间
session:
chunksize:
"""
def __init__(self, symbols=None, prefix='cn_', suffix='',
start=datetime.date(2004, 10, 8),
end=datetime.date.today() + datetime.timedelta(days=-1),
retry_count=3, pause=1, session=None,
chunksize=25):
"""
Args:
symbols: 股票代码。**此参数只接收单一股票代码**。For example:600001,000002,300002
prefix: 读取股票数据时需要拼接的前缀。默认为 ``cn_``。如果是获取指数时需要使用 ``zs_``。
suffix: 股票代码后缀。默认为空。
* 为空表示会自动根据股票代码判断。
* 对于某些特定指数请自行填写。
prefix: 读取股票数据时需要拼接的前缀。默认为 ``cn_``。如果是获取指数时需要使用 ``zs_``。
start: 开始日期。默认值:2004-10-08
end: 结束日期。默认值:当前日期的 **前一天** 。
retry_count: 重试次数
pause: 重试间隔时间
session:
chunksize:
"""
super(SohuDailyReader, self).__init__(symbols, start, end, retry_count,
pause, session, chunksize)
self._prefix = prefix
self._suffix = suffix
@property
def url(self):
# http://q.stock.sohu.com/hisHq?code=cn_600569&start=20041008&end=20180608&stat=1&order=D&period=d&rt=jsonp
return 'http://q.stock.sohu.com/hisHq'
def _get_params(self, *args, **kwargs):
return {'code': sohu._parse_symbol(self.symbols, self._prefix,
self._suffix),
'start': self.start.strftime('%Y%m%d'),
'end': self.end.strftime('%Y%m%d'),
'stat': '1',
'order': 'D',
'period': 'd',
'rt': 'jsonp'}
def read(self):
"""读取数据
Returns:
``pandas.DataFrame`` 实例。
成交量的单位为 *手*,成交金额的单位为 *万元*。
无数据时返回空白的 ``pandas.DataFrame`` 。参见 ``pandas.DataFrame.empty``。
部分返回列名说明:
* Open:开盘价
* Close: 收盘价
* High: 最高价
* Low: 最低价
* Volume: 交易量(手)
* Turnover: 成交金额
* Rate: 换手率
Examples:
.. code-block:: python
>>> from finance_datareader_py.sohu.daily import SohuDailyReader
>>> df = SohuDailyReader(symbols='000002').read()
>>> print(df.tail())
Open Close Change Quote Low High Volume Turnover Rate
Date
2004-10-14 5.80 5.67 -0.12 -2.07 5.56 5.80 265167.0 15041.02 1.68
2004-10-13 5.81 5.79 0.00 0.00 5.69 5.85 252039.0 14604.28 1.60
2004-10-12 5.53 5.79 0.25 4.51 5.50 5.87 600869.0 34637.16 3.82
2004-10-11 5.56 5.54 -0.02 -0.36 5.51 5.65 264020.0 14775.34 1.68
2004-10-08 5.42 5.56 0.14 2.58 5.28 5.60 117074.0 6368.60 0.74
"""
try:
return super(SohuDailyReader, self).read()
finally:
self.close()
def _read_url_as_StringIO(self, url, params=None):
"""
从 sohu 读取原始数据
:param url:
:param params:
:return:
"""
response = self._get_response(url, params=params)
txt = str(self._sanitize_response(response), encoding='ISO-8859-9')
data_json = json.loads(txt[9:-2])
pd_data = | json_normalize(data_json[0], record_path='hq') | pandas.io.json.json_normalize |
import sys
from itertools import zip_longest
from typing import Generator
import pandas as pd
from scipy.stats import fisher_exact
from statsmodels.stats.multitest import fdrcorrection
def promoter_size(s):
value = int(s)
if value > 2000:
raise argparse.ArgumentTypeError("Promoter size has to be less than or equal to 2000.")
return value
def probability(s):
value = float(s)
if value < 0 or value > 1:
raise argparse.ArgumentTypeError("Probability cannot be less than 0 or greater than 1.")
return value
def get_lists(path: str) -> Generator[pd.Series, None, None]:
with open(path) as f:
data = pd.Series(f.readlines())
data = data.str.strip()
names = data.index[data.str.startswith(">")].tolist()
if not names:
names = [-1]
elif names[0] > 0:
names = [-1, *names]
yield from (data[start + 1:end].rename(data.get(start, '').lstrip(">")) for start, end in
zip_longest(names, names[1:]))
def get_background(path: str) -> pd.Series:
with open(path) as f:
data = pd.Series(f.readlines())
data = data.str.strip()
data = data[~data.str.startswith('>')]
return data
if __name__ == "__main__":
import argparse
import shutil
import os
width, height = shutil.get_terminal_size()
parser = argparse.ArgumentParser()
parser.add_argument("genelist", type=str, help="gene list in FASTA format")
parser.add_argument("-A", "--annotated", type=str, help="annotated matches", default="annotated.pickle.gz")
parser.add_argument("-b", "--background", type=str, help="gene list to use as the background")
parser.add_argument("-P", "--promoter", type=promoter_size, help="limit promoter size")
parser.add_argument("-p", "--p-value", type=probability, help="p-value cutoff of match used")
parser.add_argument("-a", "--alpha", type=probability, help="alpha for enrichment", default=0.05)
parser.add_argument("--hide-rejected", action="store_true", help="only display significant results")
parser.add_argument("-o", "--output", type=str, help="output folder name")
group = parser.add_argument_group(title="limit search")
mutex = group.add_mutually_exclusive_group()
mutex.add_argument("--promoter-only", action="store_true", help="limit search to promoter")
mutex.add_argument("--promoter-overlap", action="store_true", help="search matches that overlap promoter")
mutex.add_argument("--gene-only", action="store_true", help="limit search to gene body")
mutex.add_argument("--gene-overlap", action="store_true", help="search matches that overlap gene body")
args = parser.parse_args()
annotated = pd.read_pickle(args.annotated)
if args.p_value is not None:
annotated = annotated[annotated['p-value'] < args.p_value]
if args.promoter is not None:
annotated = annotated[annotated['dist'] >= -args.promoter]
if args.background:
bg = get_background(args.background)
annotated = annotated.loc[annotated.index.isin(bg), :]
if args.promoter_only:
annotated = annotated[(annotated['stop'] - annotated['start'] + annotated['dist']) < 0]
elif args.promoter_overlap:
annotated = annotated[annotated['dist'] < 0]
elif args.gene_only:
annotated = annotated[annotated['dist'] > 0]
elif args.gene_overlap:
annotated = annotated[(annotated['stop'] - annotated['start'] + annotated['dist']) > 0]
ann_dedup = annotated.drop_duplicates('match_id')
cluster_size = ann_dedup.groupby('#pattern name').size()
print("total matches: {0[0]:,d}\n".format(ann_dedup.shape), file=sys.stderr)
def get_list_enrichment(gene_list: pd.Series, alpha: float = 0.05, hide_rejected: bool = False) -> pd.DataFrame:
print("{} genes in gene list {} are not part of the backgroud".format(
gene_list[~gene_list.isin(annotated.index)].shape[0], gene_list.name),
file=sys.stderr)
list_cluster_dedup = annotated[annotated.index.isin(gene_list)].drop_duplicates('match_id')
list_cluster_size = list_cluster_dedup.groupby('#pattern name').size()
def cluster_fisher(row):
return fisher_exact(
[[row[0], row[1] - row[0]],
[list_cluster_dedup.shape[0] - row[0],
ann_dedup.shape[0] - list_cluster_dedup.shape[0] - row[1] + row[0]]],
alternative='greater')[1]
p_values = pd.concat([list_cluster_size, cluster_size],
axis=1).fillna(0).apply(cluster_fisher, axis=1).sort_values()
reject, adj_p = fdrcorrection(p_values, alpha=alpha, is_sorted=True)
if hide_rejected:
p_values = p_values[reject]
adj_p = adj_p[reject]
adj_p = pd.Series(adj_p, index=p_values.index)
return | pd.concat([p_values, adj_p], axis=1) | pandas.concat |
import numpy as np
import pandas as pd
import plotly.express as px
from sklearn.cluster import KMeans, SpectralClustering
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import silhouette_score
from sklearn.decomposition import PCA
import plotly.graph_objects as go
from plotly.subplots import make_subplots
def data(file_name, column, bool):
with open(file_name, "r") as filestream:
line = filestream.read().split()
for i in range(0, len(line)):
line[i] = line[i].split(",")
matrix = np.array(line, dtype=str)
colors = matrix[0:, 5]
if bool != True:
matrix = np.delete(matrix, column, 1)
return matrix, colors
return matrix
def hot_vector(mushrooms):
matrix = | pd.DataFrame() | pandas.DataFrame |
"""Module for querying and parsing SPARQL through GridAPPS-D"""
import logging
import pandas as pd
import numpy as np
import re
from gridappsd import GridAPPSD, topics, utils
class SPARQLManager:
"""Class for querying and parsing SPARQL in GridAPPS-D.
"""
def __init__(self, gapps, feeder_mrid, model_api_topic, simulation_id=None, timeout=30):
"""Connect to the platform.
:param feeder_mrid: unique identifier for the feeder in
question. Since PyVVO works on a per feeder basis, this is
required, and all queries will be executed for the specified
feeder.
:param gapps: gridappsd_object
:param timeout: timeout for querying the blazegraph database.
"""
# Connect to the platform.
self.gad = gapps
# Assign feeder mrid.
self.feeder_mrid = feeder_mrid
# Timeout for SPARQL queries.
self.timeout = timeout
# Powergridmodel API topic
self.topic = model_api_topic
# Assign simulation id
self.simulation_id = simulation_id
def query_transformers(self):
"""Get information on transformers in the feeder."""
# Perform the query.
PowerTransformerEnd_QUERY = """
PREFIX r: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX c: <http://iec.ch/TC57/CIM100#>
SELECT ?pname ?vgrp ?enum ?bus ?ratedS ?ratedU WHERE {
VALUES ?fdrid {"%s"} # 9500 node
?p c:Equipment.EquipmentContainer ?fdr.
?fdr c:IdentifiedObject.mRID ?fdrid.
?p r:type c:PowerTransformer.
?p c:IdentifiedObject.name ?pname.
?p c:PowerTransformer.vectorGroup ?vgrp.
?end c:PowerTransformerEnd.PowerTransformer ?p.
?end c:TransformerEnd.endNumber ?enum.
?end c:PowerTransformerEnd.ratedS ?ratedS.
?end c:PowerTransformerEnd.ratedU ?ratedU.
?end c:PowerTransformerEnd.phaseAngleClock ?ang.
?end c:PowerTransformerEnd.connectionKind ?connraw.
bind(strafter(str(?connraw),"WindingConnection.") as ?conn)
?end c:TransformerEnd.Terminal ?trm.
?trm c:Terminal.ConnectivityNode ?cn.
?cn c:IdentifiedObject.name ?bus
}
ORDER BY ?pname ?enum
"""% self.feeder_mrid
results = self.gad.query_data(PowerTransformerEnd_QUERY)
bindings = results['data']['results']['bindings']
pte = []
for obj in bindings:
pte.append({k:v['value'] for (k, v) in obj.items()})
# output = pd.DataFrame(list_of_dicts)
TransformerTank_QUERY = """
PREFIX r: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX c: <http://iec.ch/TC57/CIM100#>
SELECT ?pname ?vgrp ?enum ?bus ?ratedS ?ratedU WHERE {
VALUES ?fdrid {"%s"} # 9500 node
?p r:type c:PowerTransformer.
?p c:Equipment.EquipmentContainer ?fdr.
?fdr c:IdentifiedObject.mRID ?fdrid.
?p c:IdentifiedObject.name ?pname.
?p c:PowerTransformer.vectorGroup ?vgrp.
?t c:TransformerTank.PowerTransformer ?p.
?asset c:Asset.PowerSystemResources ?t.
?asset c:Asset.AssetInfo ?inf.
?inf c:IdentifiedObject.name ?xfmrcode.
?end c:TransformerTankEnd.TransformerTank ?t.
?end c:TransformerTankEnd.phases ?phsraw.
bind(strafter(str(?phsraw),"PhaseCode.") as ?phs)
?end c:TransformerEnd.endNumber ?enum.
?end c:TransformerEnd.Terminal ?trm.
?trm c:Terminal.ConnectivityNode ?cn.
?cn c:IdentifiedObject.name ?bus.
?asset c:Asset.PowerSystemResources ?t.
?asset c:Asset.AssetInfo ?tinf.
?einf c:TransformerEndInfo.TransformerTankInfo ?tinf.
?einf c:TransformerEndInfo.endNumber ?enum.
?einf c:TransformerEndInfo.ratedS ?ratedS.
?einf c:TransformerEndInfo.ratedU ?ratedU.
}
ORDER BY ?pname ?tname ?enum
"""% self.feeder_mrid
results = self.gad.query_data(TransformerTank_QUERY)
bindings = results['data']['results']['bindings']
tte = []
for obj in bindings:
tte.append({k:v['value'] for (k, v) in obj.items()})
all_Transformers = pte + tte
output = pd.DataFrame(all_Transformers)
return output
def query_der(self):
"""Get information on all kinds of DERs in the feeder."""
# Perform the query.
inv_der_QUERY = """
PREFIX r: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX c: <http://iec.ch/TC57/CIM100#>
SELECT ?name ?bus ?ratedS ?ratedU WHERE {
VALUES ?fdrid {"%s"}
?s r:type c:PowerElectronicsConnection.
?s c:Equipment.EquipmentContainer ?fdr.
?fdr c:IdentifiedObject.mRID ?fdrid.
?s c:IdentifiedObject.name ?name.
?s c:IdentifiedObject.mRID ?id.
?s c:PowerElectronicsConnection.ratedS ?ratedS.
?s c:PowerElectronicsConnection.ratedS ?ratedU.
?t c:Terminal.ConductingEquipment ?s.
?t c:Terminal.ConnectivityNode ?cn.
?cn c:IdentifiedObject.name ?bus
}
ORDER BY ?name
"""% self.feeder_mrid
results = self.gad.query_data(inv_der_QUERY)
bindings = results['data']['results']['bindings']
inv_der = []
for obj in bindings:
inv_der.append({k:v['value'] for (k, v) in obj.items()})
dermachine_QUERY = """
PREFIX r: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX c: <http://iec.ch/TC57/CIM100#>
SELECT ?name ?bus ?ratedS ?ratedU WHERE {
VALUES ?fdrid {"%s"}
?s r:type c:SynchronousMachine.
?s c:IdentifiedObject.name ?name.
?s c:Equipment.EquipmentContainer ?fdr.
?fdr c:IdentifiedObject.mRID ?fdrid.
?s c:SynchronousMachine.ratedS ?ratedS.
?s c:SynchronousMachine.ratedU ?ratedU.
?t c:Terminal.ConductingEquipment ?s.
?t c:Terminal.ConnectivityNode ?cn.
?cn c:IdentifiedObject.name ?bus
}
GROUP by ?name ?bus ?ratedS ?ratedU ?p ?q ?id ?fdrid
ORDER by ?name
"""% self.feeder_mrid
results = self.gad.query_data(dermachine_QUERY)
bindings = results['data']['results']['bindings']
machine_der = []
for obj in bindings:
machine_der.append({k:v['value'] for (k, v) in obj.items()})
all_der = inv_der + machine_der
output = | pd.DataFrame(all_der) | pandas.DataFrame |
from strategy.rebalance import get_relative_to_expiry_rebalance_dates, \
get_fixed_frequency_rebalance_dates, \
get_relative_to_expiry_instrument_weights
from strategy.calendar import get_mtm_dates
import pandas as pd
import pytest
from pandas.util.testing import assert_index_equal, assert_frame_equal
def assert_dict_of_frames(dict1, dict2):
assert dict1.keys() == dict2.keys()
for key in dict1:
assert_frame_equal(dict1[key], dict2[key], check_names=False)
def test_tradeables_dates():
# no CME holdiays between this date range
sd = pd.Timestamp("2015-01-02")
ed = pd.Timestamp("2015-03-23")
exchanges = ["CME"]
tradeable_dates = get_mtm_dates(sd, ed, exchanges)
exp_tradeable_dates = pd.date_range(
"2015-01-02", "2015-03-23", freq="B"
)
assert_index_equal(tradeable_dates, exp_tradeable_dates)
# with an adhoc holiday
holidays = [pd.Timestamp("2015-01-02")]
tradeable_dates = get_mtm_dates(sd, ed, exchanges, holidays=holidays)
exp_tradeable_dates = pd.date_range(
"2015-01-03", "2015-03-23", freq="B"
)
assert_index_equal(tradeable_dates, exp_tradeable_dates)
# with CME holiday (New Years day)
sd = pd.Timestamp("2015-01-01")
ed = pd.Timestamp("2015-01-02")
tradeable_dates = get_mtm_dates(sd, ed, exchanges)
exp_tradeable_dates = pd.DatetimeIndex([pd.Timestamp("2015-01-02")])
assert_index_equal(tradeable_dates, exp_tradeable_dates)
def test_relative_to_expiry_rebalance_dates():
# each contract rolling individually, same offset
# change to ES and TY
sd = pd.Timestamp("2015-01-02")
ed = pd.Timestamp("2015-03-23")
expiries = pd.DataFrame(
[["2015ESH", "2015-03-20", "2015-03-20"],
["2015ESM", "2015-06-19", "2015-06-19"],
["2015TYH", "2015-02-27", "2015-03-20"],
["2015TYM", "2015-05-29", "2015-06-19"]],
columns=["contract", "first_notice", "last_trade"]
)
offsets = -3
rebal_dates = get_relative_to_expiry_rebalance_dates(
sd, ed, expiries, offsets, all_monthly=False, holidays=None
)
exp_rebal_dates = pd.DatetimeIndex(
["2015-01-02", "2015-02-24", "2015-03-17"]
)
assert_index_equal(rebal_dates, exp_rebal_dates)
# rolling all monthly contracts together, same offset
rebal_dates = get_relative_to_expiry_rebalance_dates(
sd, ed, expiries, offsets, all_monthly=True, holidays=None
)
exp_rebal_dates = pd.DatetimeIndex(["2015-01-02", "2015-02-24"])
assert_index_equal(rebal_dates, exp_rebal_dates)
# rolling each contract individually, different offset
offsets = {"ES": -3, "TY": -4}
rebal_dates = get_relative_to_expiry_rebalance_dates(
sd, ed, expiries, offsets, all_monthly=False, holidays=None
)
exp_rebal_dates = pd.DatetimeIndex(
["2015-01-02", "2015-02-23", "2015-03-17"]
)
assert_index_equal(rebal_dates, exp_rebal_dates)
def test_relative_to_expiry_weights():
expiries = pd.DataFrame(
[["2015ESH", "2015-03-20", "2015-03-20"],
["2015ESM", "2015-06-19", "2015-06-19"],
["2015ESU", "2015-09-18", "2015-09-18"],
["2015TYH", "2015-03-16", "2015-03-20"],
["2015TYM", "2015-05-29", "2015-06-19"],
["2015TYU", "2015-08-31", "2015-09-21"]],
columns=["contract", "first_notice", "last_trade"]
)
# one generic and one product
dts = pd.date_range("2015-03-17", "2015-03-18", freq="B")
offsets = -3
root_gnrcs = {"ES": ["ES1"]}
wts = get_relative_to_expiry_instrument_weights(
dts, root_gnrcs, expiries, offsets
)
exp_wts = {
"ES": pd.DataFrame(
[1.0, 1.0],
index=pd.MultiIndex.from_tuples(
[(pd.Timestamp("2015-03-17"), "2015ESH"),
(pd.Timestamp("2015-03-18"), "2015ESM")],
names=("date", "contract")),
columns=["ES1"]
)
}
assert_dict_of_frames(wts, exp_wts)
# multiple products
dts = pd.date_range("2015-03-13", "2015-03-20", freq="B")
offsets = -1
root_gnrcs = {"ES": ["ES1"], "TY": ["TY1"]}
wts = get_relative_to_expiry_instrument_weights(
dts, root_gnrcs, expiries, offsets
)
exp_wts = {
"ES": pd.DataFrame([1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
index=pd.MultiIndex.from_tuples(
[(pd.Timestamp("2015-03-13"), "2015ESH"),
(pd.Timestamp("2015-03-16"), "2015ESH"),
(pd.Timestamp("2015-03-17"), "2015ESH"),
(pd.Timestamp("2015-03-18"), "2015ESH"),
(pd.Timestamp("2015-03-19"), "2015ESH"),
(pd.Timestamp("2015-03-20"), "2015ESM"),],
names=("date", "contract")),
columns=["ES1"]
),
"TY": pd.DataFrame([1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
index=pd.MultiIndex.from_tuples(
[(pd.Timestamp("2015-03-13"), "2015TYH"),
(pd.Timestamp("2015-03-16"), "2015TYM"),
(pd.Timestamp("2015-03-17"), "2015TYM"),
(pd.Timestamp("2015-03-18"), "2015TYM"),
(pd.Timestamp("2015-03-19"), "2015TYM"),
(pd.Timestamp("2015-03-20"), "2015TYM"),],
names=("date", "contract")),
columns=["TY1"]
)
}
assert_dict_of_frames(wts, exp_wts)
# multiple generics
offsets = -1
dts = pd.date_range("2015-03-19", "2015-03-20", freq="B")
root_gnrcs = {"ES": ["ES1", "ES2"]}
wts = get_relative_to_expiry_instrument_weights(
dts, root_gnrcs, expiries, offsets
)
exp_wts = {
"ES": pd.DataFrame([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]],
index=pd.MultiIndex.from_tuples(
[(pd.Timestamp("2015-03-19"), "2015ESH"),
(pd.Timestamp("2015-03-19"), "2015ESM"),
(pd.Timestamp("2015-03-20"), "2015ESM"),
(pd.Timestamp("2015-03-20"), "2015ESU")],
names=("date", "contract")),
columns=["ES1", "ES2"]
)
}
assert_dict_of_frames(wts, exp_wts)
# with dict of offsets
offsets = {"ES": -4, "TY": -1}
root_gnrcs = {"ES": ["ES1"], "TY": ["TY1"]}
dts = pd.date_range("2015-03-13", "2015-03-17", freq="B")
wts = get_relative_to_expiry_instrument_weights(
dts, root_gnrcs, expiries, offsets
)
exp_wts = {
"ES": pd.DataFrame([1.0, 1.0, 1.0],
index=pd.MultiIndex.from_tuples(
[( | pd.Timestamp("2015-03-13") | pandas.Timestamp |
import os,sys
import argparse
import pandas as pd
import timeit
import copy
import json
from utils import filter_columns_from_kb, query_equal_from_kb, query_unequal_from_kb
from revertible_string import RevertibleString
# babi6_filter_keys = ['@R_cuisine', '@R_location', '@R_price']
###
# BABI Function
###
class BABI7Lexicalizer():
# Constant definition
api_call_pattern = 'api_call'
delex_prefix = '@'
delex_keys = ['@address', '@area', '@food', '@location', '@phone', '@pricerange', '@postcode', '@type', '@id', '@name']
filter_keys = ['@food', '@area', '@pricerange']
# Function to read knwoledge base as pandas dataframe sorted by rating from the given path
@staticmethod
def read_knowledge_base(path):
kb_dict = json.load(open(path, 'r'))
for i in range(len(kb_dict)):
kb_item = kb_dict[i]
for k in kb_item.keys():
if(k == "postcode"):
kb_item[k] = kb_item[k].replace(".","").replace(",","").replace(" ","").lower()
else:
kb_item[k] = kb_item[k].replace(" ","_").lower()
kb = pd.DataFrame.from_records(kb_dict).fillna('<UNK>')
kb.columns = [f'@{column}' for column in kb.columns]
kb['@food'] = kb['@food'].fillna('international')
kb['@phone'] = kb['@phone'].fillna('01223_000000')
return kb
# Function to read knwoledge base modifier and update the existing kb
@staticmethod
def modify_knowledge_base(kb, path):
raise NotImplementedError
# return kb
# Function to read dialogue from the given path
@staticmethod
def read_dialogue(template_path):
dialogues = []
dialogue = []
for line in open(template_path,'r').readlines():
if len(line) == 1: # Only \n
dialogues.append(dialogue)
dialogue = []
else:
first_space_index = line.index(' ')
turn_id = line[:first_space_index]
conv = line[first_space_index+1:].replace('\n','')
request, response = conv.split('\t')
dialogue.append((turn_id, RevertibleString(request), RevertibleString(response)))
return dialogues
# Function to generate metadata from all BABI dialogues
@staticmethod
def generate_metadata(dialogues):
delexicalized_dialog_meta = [] # Buffer containing list of tuple(dialog, delex_dict, delex_resolved_args_list, max_delex_index)
for dialogue in dialogues:
delex_to_chat_dict = { } # Dictionary of recoderd delexicalized word to list of Chat object containing the corresponding word
delex_resolved_args_list = [] # List of all recorded delexicalized words in api_call that need to be resolved for generation
max_delex_index = 0
query_max_delex_index = 0
for turn_id, request, response in dialogue:
# Process request & response
for chat in [request, response]:
if BABI7Lexicalizer.delex_prefix in chat.str:
for delex_key in BABI7Lexicalizer.delex_keys:
# TODO : harcoded, max number of entity in babi_7 is only 7, should change this one with count if possible
for i in range(1, 9):
recorded_delex_word = f'{delex_key}_{i}'
if recorded_delex_word in chat.str:
if recorded_delex_word not in delex_to_chat_dict:
delex_to_chat_dict[recorded_delex_word] = []
delex_to_chat_dict[recorded_delex_word].append(chat)
if max_delex_index < i:
max_delex_index = i
# If api_call
if response.str.startswith(BABI7Lexicalizer.api_call_pattern):
delex_words = response.str.split(' ')[1:]
delex_resolved_args = []
for delex_word in delex_words:
if delex_word.startswith(BABI7Lexicalizer.delex_prefix):
delex_resolved_args.append(delex_word)
index = int(delex_word[-1])
query_max_delex_index = max_delex_index
delex_resolved_args_list.append(delex_resolved_args)
# Add result to global metadata buffer
delexicalized_dialog_meta.append((dialogue, delex_to_chat_dict, delex_resolved_args_list, max_delex_index, query_max_delex_index))
return delexicalized_dialog_meta
# Generate knowledge base index function
@staticmethod
def generate_kb_index(kb):
possible_filter_keys_list = [ # TODO: FU**IN hardcoded combination
BABI7Lexicalizer.filter_keys, # 3 Keys
BABI7Lexicalizer.filter_keys[:2], BABI7Lexicalizer.filter_keys[1:], BABI7Lexicalizer.filter_keys[::2], # 2 Keys
[BABI7Lexicalizer.filter_keys[0]], [BABI7Lexicalizer.filter_keys[1]], [BABI7Lexicalizer.filter_keys[2]] # 1 Key
]
default_index = pd.DataFrame({'index':['_'],'filter_type':['_'],'num_entity':[kb.shape[0]],'kb':[kb]}).set_index('index')
index_kbs = [default_index]
for possible_filter_keys in possible_filter_keys_list:
possible_queries_df = kb[possible_filter_keys].drop_duplicates()
filter_type = '_'.join(possible_filter_keys)
index_keys = []
filter_types = []
kb_sizes = []
filtered_kbs = []
for row in possible_queries_df.to_dict('records'):
filters = [(attr,value) for attr, value in row.items()]
filtered_kb = query_equal_from_kb(kb, filters)
index_keys.append('_'.join([value for value in row.values()]))
kb_sizes.append(filtered_kb.shape[0])
filter_types.append(filter_type)
filtered_kbs.append(filtered_kb)
index_data = {'index':index_keys,'filter_type':filter_types,'num_entity':kb_sizes,'kb':filtered_kbs}
index_kbs.append( | pd.DataFrame(index_data) | pandas.DataFrame |
from data_science_layer.reporting.abstract_report import AbstractReport
from data_science_layer.pipeline.abstract_pipline import AbstractPipeline
import pkg_resources
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
class RegressorCurves(AbstractReport):
sub_folder = 'reports'
log_y = False
exp_y = False
def report(self, pipeline: AbstractPipeline):
# Set Directory path
folder = ''
path = pkg_resources.resource_filename('crcdal', 'cache/' + folder + '/' + self.sub_folder + '/')
pkg_resources.ensure_directory(path)
# Hist Train
fig, ax = plt.subplots(figsize=(40, 40))
pipeline.train.hist(bins=100, ax=ax)
fig.savefig(path + 'Hist_Train.png')
# Hist Test
fig, ax = plt.subplots(figsize=(40, 40))
pipeline.test.hist(bins=100, ax=ax)
fig.savefig(path + 'Hist_Test.png')
# Feature Results
nrows = len(pipeline._ml_models)
nrows = 2 if nrows == 1 else nrows
ncols = 2
ncols = 2 ** pipeline.test_y.shape[1] if pipeline.test_y.shape[1] > 1 else ncols
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, sharex=False, sharey=False, figsize=(40, 10 * nrows))
fig2, axes2 = plt.subplots(nrows=nrows, ncols=ncols, sharex=False, sharey=False, figsize=(40, 10 * nrows))
for i, model in enumerate(pipeline.get_models()):
name = model.short_name
preds_y_train, _ = model.predict(pipeline.train)
preds_y_test, _ = model.predict(pipeline.test)
preds_y_train = pd.DataFrame(preds_y_train)
preds_y_test = | pd.DataFrame(preds_y_test) | pandas.DataFrame |
import pytest
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
import dask.dataframe as dd
from dask_sql.utils import ParsingException
def test_select(c, df):
result_df = c.sql("SELECT * FROM df")
result_df = result_df.compute()
assert_frame_equal(result_df, df)
def test_select_alias(c, df):
result_df = c.sql("SELECT a as b, b as a FROM df")
result_df = result_df.compute()
expected_df = pd.DataFrame(index=df.index)
expected_df["b"] = df.a
expected_df["a"] = df.b
assert_frame_equal(result_df[["a", "b"]], expected_df[["a", "b"]])
def test_select_column(c, df):
result_df = c.sql("SELECT a FROM df")
result_df = result_df.compute()
assert_frame_equal(result_df, df[["a"]])
def test_select_different_types(c):
expected_df = pd.DataFrame(
{
"date": pd.to_datetime(["2022-01-21 17:34", "2022-01-21", "17:34", pd.NaT]),
"string": ["this is a test", "another test", "äölüć", ""],
"integer": [1, 2, -4, 5],
"float": [-1.1, np.NaN, pd.NA, np.sqrt(2)],
}
)
c.create_table("df", expected_df)
df = c.sql(
"""
SELECT *
FROM df
"""
)
df = df.compute()
assert_frame_equal(df, expected_df)
def test_select_expr(c, df):
result_df = c.sql("SELECT a + 1 AS a, b AS bla, a - 1 FROM df")
result_df = result_df.compute()
expected_df = pd.DataFrame(
{"a": df["a"] + 1, "bla": df["b"], '"df"."a" - 1': df["a"] - 1,}
)
assert_frame_equal(result_df, expected_df)
def test_select_of_select(c, df):
result_df = c.sql(
"""
SELECT 2*c AS e, d - 1 AS f
FROM
(
SELECT a - 1 AS c, 2*b AS d
FROM df
) AS "inner"
"""
)
result_df = result_df.compute()
expected_df = pd.DataFrame({"e": 2 * (df["a"] - 1), "f": 2 * df["b"] - 1})
assert_frame_equal(result_df, expected_df)
def test_select_of_select_with_casing(c, df):
result_df = c.sql(
"""
SELECT AAA, aaa, aAa
FROM
(
SELECT a - 1 AS aAa, 2*b AS aaa, a + b AS AAA
FROM df
) AS "inner"
"""
)
result_df = result_df.compute()
expected_df = pd.DataFrame(
{"AAA": df["a"] + df["b"], "aaa": 2 * df["b"], "aAa": df["a"] - 1}
)
assert_frame_equal(result_df, expected_df)
def test_wrong_input(c):
with pytest.raises(ParsingException):
c.sql("""SELECT x FROM df""")
with pytest.raises(ParsingException):
c.sql("""SELECT x FROM df""")
def test_timezones(c, datetime_table):
result_df = c.sql(
"""
SELECT * FROM datetime_table
"""
)
result_df = result_df.compute()
| assert_frame_equal(result_df, datetime_table) | pandas.testing.assert_frame_equal |
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
"""
The function to read message and categories data and combine them to one dataframe.
Parameters:
messages_filepath (str): The path for disaster messages. This should be csv file.
categories_filepath (str): The path for categories messages. This should be csv file.
Returns:
df (pandas dataframe): combined data.
"""
# Read data
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
# Merge the data by common key "id"
df = | pd.merge(messages, categories, left_on='id', right_on='id', how='inner') | pandas.merge |
import cv2
import numpy as np
from matplotlib import pyplot as plt
import os
import xlsxwriter
import pandas as pd # Excel
import struct # Binary writing
import h5py
import time
import scipy.signal
import scipy.ndimage
import scipy.io as sio # Read .mat files
from scipy.ndimage.filters import convolve,correlate,median_filter
import sklearn.metrics as skmet
import sklearn.decomposition as skdec
import sklearn.linear_model as sklin
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import KFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import normalize
from sklearn import svm
from sklearn import neighbors
#Regression
def regress(features,score):
pred = []
#Leave one out split
loo = LeaveOneOut()
for trainidx, testidx in loo.split(features):
#Indices
X_train, X_test = features[trainidx], features[testidx]
X_test -= X_train.mean(0)
X_train -= X_train.mean(0)
Y_train, Y_test = score[trainidx], score[testidx]
#Linear regression
regr = sklin.Ridge(alpha=1)
regr.fit(X_train,Y_train)
#Predicted score
pred.append(regr.predict(X_test))
return np.array(pred)
#Logistic regression
def logreg(features,score):
pred = []
#Leave one out split
loo = LeaveOneOut()
for trainidx, testidx in loo.split(features):
#Indices
X_train, X_test = features[trainidx], features[testidx]
X_test -= X_train.mean(0)
X_train -= X_train.mean(0)
Y_train, Y_test = score[trainidx], score[testidx]
#Linear regression
regr = sklin.LogisticRegression(solver='newton-cg',max_iter=1000)
regr.fit(X_train,Y_train)
#Predicted score
P = regr.predict_proba(X_test)
pred.append(P)
pred = np.array(pred)
pred = pred[:,:,1]
return pred.flatten()
#Scikit PCA
def ScikitPCA(features,ncomp):
pca = skdec.PCA(n_components=ncomp, svd_solver='full')
score = pca.fit(features).transform(features)
return pca, score
#Principal component analysis
def PCA(features,ncomp):
#Feature dimension, x=num variables,N=num observations
x,N = np.shape(features)
#Mean feature
mean_f = np.mean(features,axis=1)
#Centering
centrd = np.zeros((x,N))
for k in range(N):
centrd[:,k] = features[:,k]-mean_f
#PCs from covariance matrix if N>=x, svd otherwise
if False:
#Covariance matrix
Cov = np.zeros((x,x))
f = np.zeros((x,1))
for k in range(N):
f[:,0] = centrd[:,k]
Cov = Cov+1/N*np.matmul(f,f.T)
#Eigen values
E,V = np.linalg.eig(Cov)
#Sort eigenvalues and vectors to descending order
idx = np.argsort(E)[::-1]
V = np.matrix(V[:,idx])
E = E[idx]
for k in range(ncomp):
s = np.matmul(V[:,k].T,centrd).T
try:
score = np.concatenate((score,s),axis=1)
except NameError:
score = s
p = V[:,k]
try:
pcomp = np.concatenate((pcomp,p),axis=1)
except NameError:
pcomp = p
else:
#PCA with SVD
u,s,v = np.linalg.svd(centrd,compute_uv=1)
pcomp = v[:,:ncomp]
# Save results
writer = pd.ExcelWriter(r'C:\Users\sarytky\Desktop\trials' + r'\PCA_test.xlsx')
df1 = pd.DataFrame(centrd)
df1.to_excel(writer, sheet_name='dataAdjust')
df2 = pd.DataFrame(u)
df2.to_excel(writer, sheet_name='u')
df3 = pd.DataFrame(s)
df3.to_excel(writer, sheet_name='s')
df4 = | pd.DataFrame(v) | pandas.DataFrame |
""" """
import pandas
import numpy as np
from astropy.io import fits
from astropy.nddata import bitmask
from .io import PS1Calibrators, GaiaCalibrators
from . import tools
import dask
import dask.array as da
from dask.array.core import Array as DaskArray
from dask.delayed import Delayed
ZTF_FILTERS = {"ztfg":{"wave_eff":4813.97, "fid":1},
"ztfr":{"wave_eff":6421.81, "fid":2},
"ztfi":{"wave_eff":7883.06, "fid":3}
}
from .astrometry import WCSHolder
print("ztfimg.image is DEPRECATED. See ztfimg.science (dasked version of it) ")
class ZTFImage( WCSHolder ):
""" """
SHAPE = 3080, 3072
BITMASK_KEY = [ "tracks","sexsources","lowresponsivity","highresponsivity",
"noisy","ghosts","spillage","spikes","saturated",
"dead","nan","psfsources","brightstarhalo"]
def __init__(self, imagefile=None, maskfile=None):
""" """
if imagefile is not None:
self.load_data(imagefile)
if maskfile is not None:
self.load_mask(maskfile)
@classmethod
def fetch_local(cls):
""" """
print("To be done")
# =============== #
# Methods #
# =============== #
def query_associated_data(self, suffix=None, source="irsa", which="science",
verbose=False, **kwargs):
""" """
from ztfquery import buildurl
return getattr(buildurl,f"filename_to_{which}url")(self._filename, source=source,
suffix=suffix,
verbose=False, **kwargs)
# -------- #
# LOADER #
# -------- #
def load_data(self, imagefile, **kwargs):
""" """
self._filename = imagefile
self._data = fits.getdata(imagefile, **kwargs)
self._header = fits.getheader(imagefile, **kwargs)
def load_mask(self, maskfile, **kwargs):
""" """
self._mask = fits.getdata(maskfile,**kwargs)
self._maskheader = fits.getheader(maskfile,**kwargs)
def load_wcs(self, header=None):
""" """
if header is None:
header = self.header
super().load_wcs(header)
def load_source_background(self, r=5, setit=True, datamasked=None, **kwargs):
"""
kwargs goes to """
from sep import Background
if datamasked is None:
if self.sources is None:
from_sources = self.extract_sources(update=False, **kwargs)
else:
from_sources = self.sources
datamasked = self.get_data(applymask=True, from_sources=from_sources,
r=r, rmbkgd=False)
self._sourcebackground = Background(datamasked.byteswap().newbyteorder())
if setit:
self.set_background(self._sourcebackground.back())
def load_ps1_calibrators(self, setxy=True):
""" """
self.set_catalog( self.get_ps1_calibrators(setxy=setxy), "ps1cat")
def load_gaia_calibrators(self, setxy=True):
""" """
self.set_catalog( self.get_gaia_calibrators(setxy=setxy), "gaia")
# -------- #
# SETTER #
# -------- #
def set_background(self, background, cleardataclean=True):
"""
Parameters
----------
background: [array/float/str]
Could be:
array or float: this will be the background
str: this will call get_background(method=background)
"""
if type(background) == str:
self._background = self.get_background(method=background)
else:
self._background = background
if cleardataclean:
self._dataclean = None
def set_catalog(self, dataframe, label):
""" """
if "ra" not in dataframe.columns and "x" not in dataframe.columns:
raise ValueError("The dataframe must contains either (x,y) coords or (ra,dec) coords")
if "ra" in dataframe.columns and "x" not in dataframe.columns:
x,y = self.radec_to_xy(dataframe["ra"], dataframe["dec"])
dataframe["x"] = x
dataframe["y"] = y
if "x" in dataframe.columns and "ra" not in dataframe.columns:
ra,dec = self.xy_to_radec(dataframe["x"], dataframe["y"])
dataframe["ra"] = ra
dataframe["dec"] = dec
if "u" not in dataframe.columns:
u, v = self.radec_to_uv(dataframe["ra"], dataframe["dec"])
dataframe["u"] = u
dataframe["v"] = v
self.catalogs.set_catalog(dataframe, label)
# -------- #
# GETTER #
# -------- #
def _setxy_to_cat_(self, cat, drop_outside=True, pixelbuffer=10):
""" """
x,y = self.radec_to_xy(cat["ra"], cat["dec"])
u,v = self.radec_to_uv(cat["ra"], cat["dec"])
cat["x"] = x
cat["y"] = y
cat["u"] = u
cat["v"] = v
if drop_outside:
ymax, xmax = self.shape
cat = cat[cat["x"].between(+pixelbuffer, ymax-pixelbuffer) & \
cat["y"].between(+pixelbuffer, xmax-pixelbuffer)]
return cat
def get_psfcat(self, show_progress=False, **kwargs):
"""
psf-fit photometry catalog generated by the ztf-pipeline
"""
from ztfquery import io
from astropy.table import Table
psffilename = io.get_file(self.filename, suffix="psfcat.fits",
show_progress=show_progress, **kwargs)
data = Table(fits.open(psffilename)[1].data).to_pandas().set_index("sourceid")
# Not the same standard as calibrator cats.
data[["xpos","ypos"]] -= 1
return data.rename({"xpos":"x", "ypos":"y"}, axis=1)
def get_sexcat(self, show_progress=False, astable=False,**kwargs):
"""
nested-aperture photometry catalog generated by the ztf-pipeline
careful, nested apertures (MAG_APER, FLUX_APER and associated errors are droped to pandas.)
"""
from ztfquery import io
from astropy.table import Table
psffilename = io.get_file(self.filename, suffix="sexcat.fits", show_progress=show_progress, **kwargs)
tbl = Table(fits.open(psffilename)[1].data)#.to_pandas().set_index("sourceid")
if astable:
return tbl
names = [name for name in tbl.colnames if len(tbl[name].shape) <= 1]
return tbl[names].to_pandas().set_index("NUMBER")
def get_daophot_psf(self, **kwargs):
"""
PSF estimate at science image center as a FITS image generated by the ztf-pipeline
"""
psffile = io.get_file(img.filename, "sciimgdaopsfcent.fits", show_progress=False)
return fits.getdata(psffile)
def get_catalog(self, calibrator=["gaia","ps1"], extra=["psfcat"], isolation=20, seplimit=0.5, **kwargs):
""" **kwargs goes to get_calibrators """
from .catalog import match_and_merge
cal = self.get_calibrators(calibrator, isolation=isolation, seplimit=seplimit, **kwargs)
if "gaia" in np.atleast_1d(calibrator).tolist():
onleft = "Source"
else:
raise NotImplementedError("calibrator should contain gaia in the current implementation.")
extra = np.atleast_1d(extra).tolist()
if "psfcat" in extra:
psfcat = self.get_psfcat()
return match_and_merge(cal, psfcat, "Source", mergehow="left", suffixes=('', '_psfcat'), seplimit=seplimit)
return cal
def get_calibrators(self, which=["gaia","ps1"],
setxy=True, drop_outside=True, drop_namag=True,
pixelbuffer=10, isolation=None, mergehow="inner", seplimit=0.5, **kwargs):
""" get a DataFrame containing the requested calibrator catalog(s).
If several catalog are given, a matching will be made and the dataframe merged (in)
= implemented: gaia, ps1 =
Returns
------
DataFrame
"""
which = np.atleast_1d(which)
if len(which)==0:
raise ValueError("At least 1 catalog must be given")
# Single Catalog
if len(which) == 1:
if which[0] == "gaia":
return self.get_gaia_calibrators(setxy=setxy, drop_namag=drop_namag, drop_outside=drop_outside,
pixelbuffer=pixelbuffer,
isolation=isolation, **kwargs)
elif which[0] == "ps1":
return self.get_ps1_calibrators(setxy=setxy, drop_outside=drop_outside, pixelbuffer=pixelbuffer, **kwargs)
else:
raise ValueError(f"Only ps1 or gaia calibrator catalog have been implemented, {which} given.")
# Two Catalogs
if len(which) == 2:
if which.tolist() in [["gaia","ps1"], ["ps1","gaia"]]:
from .catalog import match_and_merge
catps1 = self.get_ps1_calibrators(setxy=setxy,
drop_outside=drop_outside, pixelbuffer=pixelbuffer, **kwargs)
catgaia = self.get_gaia_calibrators(setxy=setxy, drop_namag=drop_namag,isolation=isolation,
drop_outside=drop_outside, pixelbuffer=pixelbuffer, **kwargs)
return match_and_merge(catgaia.reset_index(),
catps1.reset_index(),
"Source", suffixes=('', '_ps1'), mergehow=mergehow,
seplimit=seplimit)
else:
raise ValueError(f"Only ps1 and gaia calibrators catalog have been implemented, {which} given.")
raise ValueError(f"Only single or pair or catalog (ps1 and/or gaia) been implemented, {which} given.")
def get_ps1_calibrators(self, setxy=True, drop_outside=True, pixelbuffer=10, **kwargs):
""" """
# remark: radec is going to be used only the fieldid is not already downloaded.
ps1cat = PS1Calibrators.fetch_data(self.rcid, self.fieldid, radec=self.get_center(system="radec"), **kwargs)
# Set mag as the current band magnitude
ps1cat['mag'] = ps1cat["%smag"%self.filtername[-1]]
ps1cat['e_mag'] = ps1cat["e_%smag"%self.filtername[-1]]
if setxy and ("ra" in ps1cat.columns and "x" not in ps1cat.columns):
ps1cat = self._setxy_to_cat_(ps1cat, drop_outside=drop_outside, pixelbuffer=pixelbuffer)
return ps1cat
def get_gaia_calibrators(self, setxy=True, drop_namag=True, drop_outside=True, pixelbuffer=10,
isolation=None, **kwargs):
""" **kwargs goes to GaiaCalibrators (dl_wait for instance)
isolation: [None or positive float] -optional-
self isolation limit (in arcsec). A True / False flag will be added to the catalog
Returns
-------
DataFrame
"""
cat = GaiaCalibrators.fetch_data(self.rcid, self.fieldid, radec=self.get_center(system="radec"), **kwargs)
if drop_namag:
cat = cat[~ | pandas.isna(cat[["gmag","rpmag","bpmag"]]) | pandas.isna |
from eagles.Supervised.utils import plot_utils as pu
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.linear_model import Lasso, LogisticRegression
from sklearn.base import clone
from sklearn.model_selection import train_test_split
from scipy import stats
def select_features(
X=None,
y=None,
methods=[],
problem_type="clf",
model_pipe=None,
imp_thresh=0.005,
corr_thresh=0.7,
bin_fts=None,
dont_drop=None,
random_seed=None,
n_jobs=None,
plot_ft_importance=False,
plot_ft_corr=False,
):
"""
Function to reduce feature set size
Expects:
X - pandas df containing the feature columns
y - pandas series containg the outcomes
imp_thresh - min importance threshold for the rf importance (below thresh cut)
corr_thresh - correlation threshold where fts above thresh are cut
nonbin_fts - list of col names with non binarized features
display_imp - coolean if true then displays the feature importances of top 20
dont_drop - list of col names don't want to drop regradless of corr or importance
Returns: list of included features, list of dropped features
"""
if len(methods) == 0:
print("NO SELECT FEATURES METHODS PASSED")
return
# get the initial features
ft_cols = list(X.columns[:])
print("Init number of features: " + str(len(ft_cols)) + " \n")
imp_drop = []
lin_drop = []
corr_drop = []
if dont_drop is None:
dont_drop = []
if "correlation" in methods:
# correlation drop
corr_fts = [x for x in X.columns if x not in bin_fts]
correlations = X[corr_fts].corr()
if plot_ft_corr:
pu.plot_feature_correlations(
df=X[corr_fts].copy(deep=True),
plot_title="Feature Correlation Pre-Drop",
)
upper = correlations.where(
np.triu(np.ones(correlations.shape), k=1).astype(np.bool)
)
corr_drop = [
column for column in upper.columns if any(upper[column].abs() > corr_thresh)
]
# drop the correlation features first then fit the models
print("Features dropping due to high correlation: " + str(corr_drop) + " \n")
ft_cols = [x for x in ft_cols if (x not in corr_drop) or (x in dont_drop)]
X = X[ft_cols].copy(deep=True)
# Model importance
X_train, X_test, y_train, y_test = train_test_split(
X[ft_cols], y, test_size=0.2, random_state=random_seed
)
if "rf_importance" in methods:
if problem_type == "clf":
forest = RandomForestClassifier(
n_estimators=200, random_state=random_seed, n_jobs=n_jobs
)
else:
forest = RandomForestRegressor(
n_estimators=200, random_state=random_seed, n_jobs=n_jobs
)
if model_pipe:
tmp_pipe = clone(model_pipe)
tmp_pipe.steps.append(["mod", forest])
forest = clone(tmp_pipe)
forest.fit(X_train, y_train)
if model_pipe:
forest = forest.named_steps["mod"]
rf_importances = forest.feature_importances_
ftImp = {"Feature": ft_cols, "Importance": rf_importances}
ftImp_df = pd.DataFrame(ftImp)
ftImp_df.sort_values(["Importance"], ascending=False, inplace=True)
imp_drop = list(ftImp_df[ftImp_df["Importance"] < imp_thresh]["Feature"])
print("Features dropping from low importance: " + str(imp_drop) + " \n")
if plot_ft_importance:
pu.plot_feature_importance(
ft_df=ftImp_df,
mod_type=type(forest).__name__,
plot_title="RF Feature Selection Importance",
)
if "regress" in methods:
if problem_type == "clf":
lin_mod = LogisticRegression(
penalty="l1", solver="liblinear", random_state=random_seed
)
else:
lin_mod = Lasso(random_state=random_seed)
if model_pipe:
tmp_pipe = clone(model_pipe)
tmp_pipe.steps.append(["mod", lin_mod])
lin_mod = clone(tmp_pipe)
lin_mod.fit(X_train, y_train)
if model_pipe:
lin_mod = lin_mod.named_steps["mod"]
if problem_type == "clf":
tmp = pd.DataFrame({"Feature": ft_cols, "Coef": lin_mod.coef_[0]})
else:
tmp = pd.DataFrame({"Feature": ft_cols, "Coef": lin_mod.coef_})
lin_drop = list(tmp["Feature"][tmp["Coef"] == 0])
print("Features dropping from l1 regression: " + str(lin_drop) + " \n")
if plot_ft_importance:
pu.plot_feature_importance(
ft_df=tmp,
mod_type=type(lin_mod).__name__,
plot_title="Logistic l1 Feature Selection Coefs",
)
# get the final drop and feature sets
drop_fts = list(set(imp_drop + lin_drop + corr_drop))
sub_fts = [col for col in ft_cols if (col not in drop_fts) or (col in dont_drop)]
print("Final number of fts : " + str(len(sub_fts)) + "\n \n")
print("Final features: " + str(sub_fts) + "\n \n")
print("Dropped features: " + str(drop_fts) + "\n \n")
return sub_fts, drop_fts
def create_bin_table(df=None, bins=None, bin_col=None, actual_col=None):
"""
Function to generate the bin tables with percents
Expects: df - pandas df from reco weighting containing rectaken_01 and col to be binned
bins - default to prob taken bins unless passed list of bin steps i.e. [x/100 for x in range(-5,105,5)]
bin_col - name of the col to be binned
save_dir - directory to save the pandas dataframe out to
Returns: Saves the generated dataframe out to csv
"""
# Generate the bin col name
bin_col_name = bin_col + "_bin"
# Generate the list of bins (go by 5%)
# default to prob taken include -5 so that anything at 0 will have bin and go above 100 so
# that include values in bins from 95 to 100
if bins is None:
bin_list = [x / 100 for x in range(-5, 105, 5)]
else:
bin_list = bins
# create the bins
df[bin_col_name] = pd.cut(df[bin_col], bin_list)
# get the counts for the number of obs in each bin and the percent taken in each bin
cnts = df[bin_col_name].value_counts().reset_index()
cnts.columns = [bin_col_name, "count"]
# Get the percent ivr per bin
percs = df.groupby(by=bin_col_name)[actual_col].mean().reset_index()
percs.columns = [bin_col_name, "percent_actual"]
# combine the counts and the percents, sort the table by bin and write the table out
wrt_table = cnts.merge(
percs, left_on=bin_col_name, right_on=bin_col_name, how="inner"
)
wrt_table.sort_values(by=bin_col_name, inplace=True)
# calc the correlation between probab bin rank and the percent actual
# asssumes table in order at this point
if wrt_table.isnull().values.any():
return wrt_table, np.nan
else:
ranks = [i for i in range(wrt_table.shape[0])]
corr, p = stats.pearsonr(ranks, wrt_table["percent_actual"])
return [wrt_table, corr]
def get_feature_importances(mod_name=None, mod=None, features=None):
features = ["ft_" + str(ft) if isinstance(ft, int) else ft for ft in features]
if (
("RandomForest" in mod_name)
or ("GradientBoosting" in mod_name)
or ("DecisionTree" in mod_name)
or ("ExtraTrees" in mod_name)
):
importance_values = mod.feature_importances_
ftImp = {"Feature": features, "Importance": importance_values}
ftImp_df = pd.DataFrame(ftImp)
# display_imp is true then plot the importance values of the features
ftImp_df = ftImp_df.sort_values(["Importance"], ascending=False).reset_index(
drop=True
)
return ftImp_df
elif ("Regression" in mod_name) or (
mod_name in ["Lasso", "ElasticNet", "PoissonRegressor"]
):
if mod_name == "LogisticRegression":
tmp = pd.DataFrame({"Feature": features, "Coef": mod.coef_[0]})
else:
tmp = pd.DataFrame({"Feature": features, "Coef": mod.coef_})
tmp["Abs_Coef"] = tmp["Coef"].abs()
tmp = tmp.sort_values(["Abs_Coef"], ascending=False).reset_index(drop=True)
tmp = tmp[["Feature", "Coef"]].copy(deep=True)
return tmp
return
def _unpack_voting_models(mod, mod_type, X, disp, num_top_fts):
ft_imp_df = pd.DataFrame()
for c in mod.estimators_:
if type(c).__name__ == "Pipeline":
if "feature_selection" in c.named_steps:
inds = [mod.named_steps["feature_selection"].get_support()][0]
tmp_fts = X.columns[inds]
else:
tmp_fts = list(X.columns)
tmp_mod = c.named_steps[mod_type]
tmp_ft_imp_df = get_feature_importances(
mod_name=type(c.named_steps[mod_type]).__name__,
mod=tmp_mod,
features=tmp_fts,
)
if disp:
pu.plot_feature_importance(
ft_df=tmp_ft_imp_df,
mod_name=type(c.named_steps[mod_type]).__name__,
num_top_fts=num_top_fts,
plot_title=type(c.named_steps[mod_type]).__name__
+ " Model Importance",
)
else:
tmp_ft_imp_df = get_feature_importances(
mod_name=type(c).__name__, mod=c, features=list(X.columns)
)
if disp:
pu.plot_feature_importance(
ft_df=tmp_ft_imp_df,
mod_name=type(c).__name__,
num_top_fts=num_top_fts,
plot_title=type(c).__name__ + " Model Importance",
)
tmp_ft_imp_df.columns = ["features", "value"]
if type(c).__name__ == "Pipeline":
tmp_mod_name = type(c.named_steps[mod_type]).__name__
else:
tmp_mod_name = type(c).__name__
tmp_ft_imp_df["features"] = tmp_mod_name + "_" + tmp_ft_imp_df["features"]
ft_imp_df = | pd.concat([ft_imp_df, tmp_ft_imp_df]) | pandas.concat |
# -*- coding: utf-8 -*-
"""User functions to streamline working with grids of OLS and LME
model summaries and sets of models."""
import itertools
import copy
import warnings
import re
from cycler import cycler as cy
from collections import defaultdict
import pprint as pp
import numpy as np
import pandas as pd
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib.lines import Line2D
import fitgrid
# enforce some common structure for summary dataframes
# scraped out of different fit objects.
# _TIME is a place holder and replaced by the grid.time value on the fly
INDEX_NAMES = ['_TIME', 'model', 'beta', 'key']
# each model, beta combination has all these values,
# some are per-beta, some are per-model
KEY_LABELS = [
'2.5_ci',
'97.5_ci',
'AIC',
'DF',
'Estimate',
'P-val',
'SE',
'SSresid',
'T-stat',
'has_warning',
'logLike',
'sigma2',
'warnings',
]
# special treatment for per-model values ... broadcast to all params
PER_MODEL_KEY_LABELS = [
'AIC',
'SSresid',
'has_warning',
'warnings',
'logLike',
'sigma2',
]
def summarize(
epochs_fg,
modeler,
LHS,
RHS,
parallel=False,
n_cores=2,
quiet=False,
**kwargs,
):
"""Fit the data with one or more model formulas and return summary information.
Convenience wrapper, useful for keeping memory use manageable when
gathering betas and fit measures for a stack of models.
Parameters
----------
epochs_fg : fitgrid.epochs.Epochs
as returned by `fitgrid.epochs_from_dataframe()` or
`fitgrid.from_hdf()`, *NOT* a `pandas.DataFrame`.
modeler : {'lm', 'lmer'}
class of model to fit, `lm` for OLS, `lmer` for linear mixed-effects.
Note: the RHS formula language must match the modeler.
LHS : list of str
the data columns to model
RHS : model formula or list of model formulas to fit
see the Python package `patsy` docs for `lm` formula language
and the R library `lme4` docs for the `lmer` formula language.
parallel : bool
If True, model fitting is distributed to multiple cores
n_cores : int
number of cores to use. See what works, but golden rule if running
on a shared machine.
quiet : bool
Show progress bar default=True
**kwargs : key=value arguments passed to the modeler, optional
Returns
-------
summary_df : `pandas.DataFrame`
indexed by `timestamp`, `model_formula`, `beta`, and `key`,
where the keys are `ll.l_ci`, `uu.u_ci`, `AIC`, `DF`, `Estimate`,
`P-val`, `SE`, `T-stat`, `has_warning`, `logLike`.
Examples
--------
>>> lm_formulas = [
'1 + fixed_a + fixed_b + fixed_a:fixed_b',
'1 + fixed_a + fixed_b',
'1 + fixed_a,
'1 + fixed_b,
'1',
]
>>> lm_summary_df = fitgrid.utils.summarize(
epochs_fg,
'lm',
LHS=['MiPf', 'MiCe', 'MiPa', 'MiOc'],
RHS=lmer_formulas,
parallel=True,
n_cores=4
)
>>> lmer_formulas = [
'1 + fixed_a + (1 + fixed_a | random_a) + (1 | random_b)',
'1 + fixed_a + (1 | random_a) + (1 | random_b)',
'1 + fixed_a + (1 | random_a)',
]
>>> lmer_summary_df = fitgrid.utils.summarize(
epochs_fg,
'lmer',
LHS=['MiPf', 'MiCe', 'MiPa', 'MiOc'],
RHS=lmer_formulas,
parallel=True,
n_cores=12,
REML=False
)
"""
warnings.warn(
'fitgrid summaries are in early days, subject to change', FutureWarning
)
# modicum of guarding
msg = None
if isinstance(epochs_fg, pd.DataFrame):
msg = (
"Convert dataframe to fitgrid epochs with "
"fitgrid.epochs_from_dataframe()"
)
elif not isinstance(epochs_fg, fitgrid.epochs.Epochs):
msg = f"epochs_fg must be a fitgrid.Epochs not {type(epochs_fg)}"
if msg is not None:
raise TypeError(msg)
# select modler
if modeler == 'lm':
_modeler = fitgrid.lm
_scraper = _lm_get_summaries_df
elif modeler == 'lmer':
_modeler = fitgrid.lmer
_scraper = _lmer_get_summaries_df
else:
raise ValueError("modeler must be 'lm' or 'lmer'")
# promote RHS scalar str to singleton list
RHS = np.atleast_1d(RHS).tolist()
# loop through model formulas fitting and scraping summaries
summaries = []
for _rhs in RHS:
summaries.append(
_scraper(
_modeler(
epochs_fg,
LHS=LHS,
RHS=_rhs,
parallel=parallel,
n_cores=n_cores,
quiet=quiet,
**kwargs,
)
)
)
summary_df = pd.concat(summaries)
_check_summary_df(summary_df, epochs_fg)
return summary_df
# ------------------------------------------------------------
# private-ish summary helpers for scraping summary info from fits
# ------------------------------------------------------------
def _check_summary_df(summary_df, fg_obj):
"""check summary df structure, and against the fitgrid object if any"""
# fg_obj can be fitgrid.Epochs, LMGrid or LMERGrid, they all have a time attribute
# check for fatal error conditions
error_msg = None # set on error
# check summary
if not isinstance(summary_df, pd.DataFrame):
error_msg = "summary data is not a pandas.DataFrame"
elif not len(summary_df):
error_msg = "summary data frame is empty"
elif not summary_df.index.names[1:] == INDEX_NAMES[1:]:
# first name is _TIME, set from user epochs data
error_msg = (
f"summary index names do not match INDEX_NAMES: {INDEX_NAMES}"
)
elif not all(summary_df.index.levels[-1] == KEY_LABELS):
error_msg = (
f"summary index key levels dot match KEY_LABELS: {KEY_LABELS}"
)
else:
# TBD
pass
# does summary of an object agree with its object?
if fg_obj:
assert any(
[
isinstance(fg_obj, fgtype)
for fgtype in [
fitgrid.epochs.Epochs,
fitgrid.fitgrid.LMFitGrid,
fitgrid.fitgrid.LMERFitGrid,
]
]
)
if not summary_df.index.names == [fg_obj.time] + INDEX_NAMES[1:]:
error_msg = (
f"summary fitgrid object index mismatch: "
f"summary_df.index.names: {summary_df.index.names} "
f"fitgrd object: {[fg_obj.time] + INDEX_NAMES[1:]}"
)
if error_msg:
raise ValueError(error_msg)
# check for non-fatal issues
if "warnings" not in summary_df.index.unique("key"):
msg = (
"Summaries are from fitgrid version < 0.5.0, use that version or re-fit the"
f" models with this one fitgrid.utils.summarize() v{fitgrid.__version__}"
)
raise RuntimeError(msg)
def _update_INDEX_NAMES(lxgrid, index_names):
"""use the grid time column name for the summary index"""
assert index_names[0] == '_TIME'
_index_names = copy.copy(index_names)
_index_names[0] = lxgrid.time
return _index_names
def _stringify_lmer_warnings(fg_lmer):
"""create grid w/ _ separated string of lme4::lmer warning list items, else "" """
warning_grids = fitgrid.utils.lmer.get_lmer_warnings(
fg_lmer
) # dict of indicator dataframes
warning_string_grid = pd.DataFrame(
np.full(fg_lmer._grid.shape, ""),
index=fg_lmer._grid.index.copy(),
columns=fg_lmer._grid.columns.copy(),
)
# collect multiple warnings into single sorted "_" separated strings
# on a tidy time x channel grid
for warning, warning_grid in warning_grids.items():
for idx, row_vals in warning_grid.iterrows():
for jdx, col_val in row_vals.iteritems():
if col_val:
if len(warning_string_grid.loc[idx, jdx]) == 0:
warning_string_grid.loc[idx, jdx] = warning
else:
# split, sort, reassemble
wrns = "_".join(
sorted(
warning_string_grid.loc[idx, jdx].split("_")
+ [warning]
)
)
warning_string_grid.loc[idx, jdx] = wrns
return warning_string_grid
# def _unstringify_lmer_warnings(lmer_summaries):
# """convert stringfied lmer warning grid back into dict of indicator grids as in get_lmer_warnings()"""
# string_warning_grid = lmer_summaries.query("key=='warnings'")
# warnings = []
# for warning in np.unique(string_warning_grid):
# if len(warning) > 0:
# warnings += warning.split("_")
# warning_grids = {}
# for warning in sorted(warnings):
# warning_grids[warning] = string_warning_grid.applymap(
# lambda x: 1 if warning in x else 0
# )
# return warning_grids
def _lm_get_summaries_df(fg_ols, ci_alpha=0.05):
"""scrape fitgrid.LMFitgrid OLS info into a tidy dataframe
Parameters
----------
fg_ols : fitgrid.LMFitGrid
ci_alpha : float {.05}
alpha for confidence interval
Returns
-------
summaries_df : pd.DataFrame
index.names = [`_TIME`, `model`, `beta`, `key`] where
`_TIME` is the `fg_ols.time` and columns are the `fg_ols` columns
Notes
-----
The `summaries_df` row and column indexes are munged to match
fitgrid.lmer._get_summaries_df()
"""
# set time column from the grid, always index.names[0]
_index_names = _update_INDEX_NAMES(fg_ols, INDEX_NAMES)
_time = _index_names[0]
# grab and tidy the formula RHS
rhs = fg_ols.tester.model.formula.split('~')[1].strip()
rhs = re.sub(r"\s+", " ", rhs)
# fitgrid returns them in the last column of the index
param_names = fg_ols.params.index.get_level_values(-1).unique()
# fetch a master copy of the model info
model_vals = []
model_key_attrs = [
("DF", "df_resid"),
("AIC", "aic"),
("logLike", 'llf'),
("SSresid", 'ssr'),
("sigma2", 'mse_resid'),
]
for (key, attr) in model_key_attrs:
vals = None
vals = getattr(fg_ols, attr).copy()
if vals is None:
raise AttributeError(f"model: {rhs} attribute: {attr}")
vals['key'] = key
model_vals.append(vals)
# statsmodels result wrappers have different versions of llf!
aics = (-2 * fg_ols.llf) + 2 * (fg_ols.df_model + fg_ols.k_constant)
if not np.allclose(fg_ols.aic, aics):
msg = (
"uh oh ...statsmodels OLS aic and llf calculations have changed."
" please report an issue to fitgrid"
)
raise ValueError(msg)
# handle warnings
# build model has_warnings with False for ols
has_warnings = pd.DataFrame(
np.zeros(model_vals[0].shape).astype('bool'),
columns=model_vals[0].columns,
index=model_vals[0].index,
)
has_warnings['key'] = 'has_warning'
model_vals.append(has_warnings)
# build empty warning string to match has_warnings == False
warnings = has_warnings.applymap(lambda x: "")
warnings["key"] = "warnings"
model_vals.append(warnings)
model_vals = pd.concat(model_vals)
# constants across the model
model_vals['model'] = rhs
# replicate the model info for each beta
# ... horribly redundant but mighty handy when slicing later
pmvs = []
for p in param_names:
pmv = model_vals.copy()
# pmv['param'] = p
pmv['beta'] = p
pmvs.append(pmv)
pmvs = (
pd.concat(pmvs).reset_index().set_index(_index_names)
) # INDEX_NAMES)
# lookup the param_name specific info for this bundle
summaries = []
# select model point estimates mapped like so (key, OLS_attribute)
sv_attrs = [
('Estimate', 'params'), # coefficient value
('SE', 'bse'),
('P-val', 'pvalues'),
('T-stat', 'tvalues'),
]
for idx, (key, attr) in enumerate(sv_attrs):
attr_vals = getattr(fg_ols, attr).copy() # ! don't mod the _grid
if attr_vals is None:
raise AttributeError(f"not found: {attr}")
attr_vals.index.set_names('beta', level=-1, inplace=True)
attr_vals['model'] = rhs
attr_vals['key'] = key
# update list of beta bundles
summaries.append(
attr_vals.reset_index().set_index(_index_names)
) # INDEX_NAMES))
# special handling for confidence interval
ci_bounds = [
f"{bound:.1f}_ci"
for bound in [100 * (1 + (b * (1 - ci_alpha))) / 2.0 for b in [-1, 1]]
]
cis = fg_ols.conf_int(alpha=ci_alpha)
cis.index = cis.index.rename([_time, 'beta', 'key'])
cis.index = cis.index.set_levels(ci_bounds, 'key')
cis['model'] = rhs
summaries.append(cis.reset_index().set_index(_index_names))
summaries_df = | pd.concat(summaries) | pandas.concat |
import sys
import os
import yaml
import argparse
import numpy as np
import pandas as pd
import csv
import random
import stat
import glob
import subprocess
from statistics import mean
from pprint import pprint, pformat
import geopandas
from shapely.geometry import Point
from math import sin, cos, atan2, sqrt, pi
from pymoo.algorithms.moo.nsga2 import NSGA2
from pymoo.algorithms.moo.nsga3 import NSGA3
from pymoo.algorithms.moo.moead import MOEAD, ParallelMOEAD
from pymoo.factory import get_sampling, get_crossover, get_mutation, \
get_problem, get_reference_directions
from pymoo.optimize import minimize
from pymoo.visualization.scatter import Scatter
from pymoo.core.problem import Problem
from pymoo.factory import get_performance_indicator
from moo_algs.bce_moead import BCEMOEAD
import time
from datetime import timedelta
work_dir = os.path.dirname(os.path.abspath(__file__))
EXEC_LOG_FILE = None
USE_PJ = False
QCG_MANAGER = None
class dict_to_obj:
def __init__(self, in_dict: dict):
assert isinstance(in_dict, dict)
for key, val in in_dict.items():
if isinstance(val, (list, tuple)):
setattr(self, key, [dict_to_obj(x) if isinstance(
x, dict) else x for x in val])
else:
setattr(self, key, dict_to_obj(val)
if isinstance(val, dict) else val)
def MOO_log(msg):
with open(EXEC_LOG_FILE, "a") as log_file:
print("{}".format(msg), file=log_file)
def read_MOO_setting_yaml():
"""
read MOO setting from yaml file
"""
with open(os.path.join(work_dir, "MOO_setting.yaml")) as f:
MOO_CONFIG = yaml.safe_load(f)
# convert the json to a nested object
# MOO_CONFIG_DICT = dict_to_obj(MOO_CONFIG)
# return MOO_CONFIG_DICT
return MOO_CONFIG
class FLEE_MOO_Problem(Problem):
def __init__(self, execution_mode, simulation_period, cores,
work_dir=work_dir):
# TODO: add input vraibles to MOO_setting.yaml file
super().__init__(n_var=1,
n_obj=5,
xl=np.array([0]), #
xu=np.array([19688])) #
self.work_dir = work_dir
self.cnt_SWEEP_dir = 0
self.execution_mode = execution_mode
self.simulation_period = simulation_period
self.cores = cores
def avg_distance(self, agents_out_files, camp_name):
df_array = [pd.read_csv(filename, index_col=None, header=0)
for filename in agents_out_files]
df = pd.concat(df_array, axis=0, ignore_index=True)
# filter rows for agent location == camp_name
df = df[(df["agent location"] == camp_name) &
(df["distance_moved_this_timestep"] > 0)
]
df.to_csv(os.path.join(
os.path.dirname(agents_out_files[0]), "df_agents.out.csv"),
sep=",",
mode="w",
index=False,
encoding='utf-8'
)
return df["distance_travelled"].mean()
def find_closest_location_to_camp(self, camp_lon, camp_lat):
# in kilometres
R = 6371
p = pi/180
dist = []
locations=[]
# Read lat(Latitude) and lon(Longitude) column in locations.csv file row by row.
locations_path = os.path.join(self.work_dir, "input_csv", "locations.csv")
with open(locations_path, newline='') as csvfile:
reader = csv.reader(csvfile)
next(reader)
# Iterate over each row after the header in the csv
for row in reader:
# row variable is a list that represents a row in csv
# print(row)
if row[2] == 'South_Sudan':
locations.append(row[0])
lat = float(row[3])
lon = float(row[4])
MOO_log(msg="\tlocation ={}".format(row[0]))
MOO_log(msg="\tlongitude ={}".format(lon))
MOO_log(msg="\tlatitude ={}".format(lat))
# calculate the haversine distance between Z and other locations in south sudan, respectively.
phi = (camp_lat-lat) * p
lam = (lon-camp_lon) * p
a = sin(phi/2)*sin(phi/2)+cos(lat*p)*cos(camp_lat*p)*sin(lam/2)*sin(lam/2);
c = 2*atan2(sqrt(a),sqrt(1-a))
dist.append(R * c)
MOO_log(msg="\tall locations ={}".format(locations))
MOO_log(msg="\tdistance between these locations and Z={}".format(dist))
# find the shortest path
min_dist = np.amin(dist)
index_min_dist = dist.index(min_dist)
nearest_loc = locations[index_min_dist]
return nearest_loc, min_dist
# --------------------------------------------------------------------------
def change_route_to_camp(self, csv_name):
"""
Change the location that connect to the camp
"""
MOO_log(msg="\n[change_route_to_camp]")
selectedCamps_csv_PATH = os.path.join(self.work_dir, "input_csv", csv_name)
# Read the data in selectedCamps.csv file row by row.
with open(selectedCamps_csv_PATH, newline='') as csvfile:
reader = csv.reader(csvfile)
next(reader)
# print(header)
# Iterate over each row after the header in the csv
for row in reader:
# row variable is a list that represents a row in csv
# print(row)
lon = float(row[0])
lat = float(row[1])
ipc = float(row[2])
accessibility = float(row[3])
MOO_log(msg="\tcamp lon ={}".format(lon))
MOO_log(msg="\tcamp lat ={}".format(lat))
# 1. Find the nearest location to camp and calculate the distance
# between them.
nearest_loc, min_dist = self.find_closest_location_to_camp(
camp_lon=float(lon), camp_lat=float(lat)
)
# 2. Read routes.csv and modify the data (i.e., the nearest
# location to camp and the distance between them)
routes_csv_PATH = os.path.join(self.work_dir, "input_csv", "routes.csv")
df = pd.read_csv(routes_csv_PATH)
# change one value of a row
df.loc[lambda df: df['name2'] == 'Z', lambda df:'#name1'] = nearest_loc
df.loc[lambda df: df['name2'] == 'Z', lambda df:'distance'] = str(min_dist)
MOO_log(msg="\tLatitude of camp Z: {} \n\t"
"Longitude of camp Z: {}\n\t"
"nearest location: {}\n\t"
"distance to {}:{}".format(
float(lon),
float(lat),
nearest_loc,
nearest_loc, min_dist)
)
# 3. Write the updated route.csv in the moo_ssudan SWEEP
# directory.
sweep_dir = os.path.join(self.work_dir, "SWEEP")
# curr_dir_count = len(os.listdir(sweep_dir))
curr_dir_count = self.cnt_SWEEP_dir
sub_dir_SWEEP = os.path.join(
sweep_dir, "{}".format(curr_dir_count + 1), "input_csv"
)
if os.path.exists(sub_dir_SWEEP):
raise RuntimeError(
"SWEEP dir {} is exists !!!!!".format(sub_dir_SWEEP)
)
os.makedirs(sub_dir_SWEEP)
MOO_log(msg="\tgenerates SWEEP : {}".format(sub_dir_SWEEP))
updated_routes_csv_PATH = os.path.join(sub_dir_SWEEP, "routes.csv")
df.to_csv(updated_routes_csv_PATH, index = False)
# 4. Write campIPC.csv in the moo_ssudan SWEEP directory
campIPC_PATH = os.path.join(sub_dir_SWEEP, "campIPC.csv")
with open(campIPC_PATH, "w", newline="") as fout:
writer = csv.writer(fout, delimiter=",")
writer.writerow(["lon", "lat", "ipc", "accessibility"])
writer.writerow([lon, lat, ipc, accessibility])
self.cnt_SWEEP_dir += 1
MOO_log(msg="\t{}".format("-" * 30))
# --------------------------------------------------------------------------
def flee_optmization(self, run_dir, camp_name):
MOO_log(msg="\n[flee_optmization] called for "
"run_dir = {} camp_name = {}".format(run_dir, camp_name)
)
# calculate camp population, obj#2
df = pd.read_csv(os.path.join(run_dir, "out.csv"))
sim_camp_population_last_day = df["{} sim".format(camp_name)].iloc[-1]
sim_camp_population = df["{} sim".format(camp_name)].tolist()
MOO_log(msg="\tsim camp {} population of the last day = {}".format(
camp_name, sim_camp_population_last_day)
)
MOO_log(msg="\tsim camp {} population = {}".format(
camp_name, sim_camp_population)
)
# find the agents.out files
agents_out_files = glob.glob(
"{}".format(os.path.join(run_dir, "agents.out.*"))
)
# obj#1
avg_distance_travelled = self.avg_distance(
agents_out_files=agents_out_files, camp_name=camp_name
)
MOO_log(
msg="\tInput file : {}"
"\n\t\tavg distance travelled for agents "
"to camp name {} = {}".format(
[os.path.basename(filename) for filename in agents_out_files],
camp_name,
avg_distance_travelled
)
)
# clean agents.out files to reduce the disk space usage
clean_agents_cmd = "rm {}".format(os.path.join(
os.path.dirname(agents_out_files[0]), "agents.out.*"))
subprocess.check_output(
clean_agents_cmd,
shell=True,
)
# calculate camp capacity
PopulationScaledownFactor = 100
df = pd.read_csv(os.path.join(run_dir, "input_csv", "locations.csv"))
camp_population = df[df["#name"] == camp_name]["population"].values[0]
camp_population = camp_population/PopulationScaledownFactor
MOO_log(msg="\tmax camp {} population = {}".format(
camp_name, camp_population)
)
# calculate average remain camp capacity over simulation days, obj#3
remain_camp_capacity = mean(
[abs(camp_population - i) for i in sim_camp_population]
)
MOO_log(msg="\tremain camp {} capacity = {}".format(
camp_name, remain_camp_capacity)
)
# calculate IPC phase, obj#4
input_dir_SWEEP = os.path.join(run_dir, "input_csv")
ipc_df = pd.read_csv(os.path.join(input_dir_SWEEP, "campIPC.csv"))
camp_ipc = float(ipc_df.loc[0,"ipc"])
# calculate accessibility score, obj#5
camp_accessibility = float(ipc_df.loc[0,"accessibility"])
MOO_log(msg="\tcamp {}: IPC phase = {},\taccessibility score = {}".format(
camp_name, camp_ipc, camp_accessibility)
)
# return values [obj#1, obj#2, obj#3, obj#4, obj#5]
return [avg_distance_travelled, sim_camp_population_last_day,
remain_camp_capacity, camp_ipc, camp_accessibility]
#------------------------------------start-----------------------------------
def run_simulation_with_PJ(self, sh_jobs_scripts):
"""
running simulation from SWEEP dir using PJ
"""
from qcg.pilotjob.api.job import Jobs
jobs = Jobs()
for sh_job_scripts in sh_jobs_scripts:
sweep_dir_name = os.path.basename(os.path.dirname(sh_job_scripts))
jobs.add(
name="SWEEP_{}".format(sweep_dir_name),
exec="bash",
args=["-l", sh_job_scripts],
stdout="{}/{}.stdout".format(
os.path.dirname(sh_job_scripts),
"${jname}__${uniq}"
),
stderr="{}/{}.stderr".format(
os.path.dirname(sh_job_scripts),
"${jname}__${uniq}"
),
numCores={"exact": self.cores},
model="default"
)
print("\nAdd job with :")
print("name=SWEEP_{}".format(sweep_dir_name))
print("args = [-l,{}]".format(sh_job_scripts))
print("stdout = {}/{}.stdout".format(
os.path.dirname(sh_job_scripts),
"${jname}__${uniq}")
)
print("stderr = {}/{}.stderr".format(
os.path.dirname(sh_job_scripts),
"${jname}__${uniq}")
)
print("numCores=exact: {}".format(self.cores))
ids = QCG_MANAGER.submit(jobs)
# wait until submited jobs finish
QCG_MANAGER.wait4(ids)
print("\nAll new SWEEP dirs are finished...\n")
def run_simulation_without_PJ(self, sh_jobs_scripts):
"""
running simulation from SWEEP dir without using PJ
"""
for sh_job_scripts in sh_jobs_scripts:
# subprocess.check_output(sh_job_scripts, shell=True)
try:
p = subprocess.Popen(sh_job_scripts, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
except Exception as e:
raise RuntimeError("Unexpected error: {}".format(e))
sys.exit()
acceptable_err_subprocesse_ret_codes = [0]
if p.returncode not in acceptable_err_subprocesse_ret_codes:
raise RuntimeError(
"\njob execution encountered an error (return code {})"
"while executing '{}'".format(p.returncode, command)
)
sys.exit(0)
#-------------------------------------end------------------------------------
def _evaluate(self, x, out, *args, **kwargs):
"""
1. The _evaluate method takes a one-dimensional NumPy array X with n rows as an input.
The row represents an individual, namely, the index of a possible camp location.
After doing the necessary calculations, the objective values must be
added to the dictionary, out, with the key F.
"""
# ---------------------------------start--------------------------------
# read accessible_camp_ipc.csv
df = | pd.read_csv("accessible_camp_ipc.csv") | pandas.read_csv |
import pytest
import numpy as np
import pandas
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
import matplotlib
import modin.pandas as pd
from modin.pandas.utils import to_pandas
from numpy.testing import assert_array_equal
from .utils import (
random_state,
RAND_LOW,
RAND_HIGH,
df_equals,
df_is_empty,
arg_keys,
name_contains,
test_data_values,
test_data_keys,
test_data_with_duplicates_values,
test_data_with_duplicates_keys,
numeric_dfs,
no_numeric_dfs,
test_func_keys,
test_func_values,
query_func_keys,
query_func_values,
agg_func_keys,
agg_func_values,
numeric_agg_funcs,
quantiles_keys,
quantiles_values,
indices_keys,
indices_values,
axis_keys,
axis_values,
bool_arg_keys,
bool_arg_values,
int_arg_keys,
int_arg_values,
)
# TODO remove once modin-project/modin#469 is resolved
agg_func_keys.remove("str")
agg_func_values.remove(str)
pd.DEFAULT_NPARTITIONS = 4
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
class TestDFPartOne:
# Test inter df math functions
def inter_df_math_helper(self, modin_df, pandas_df, op):
# Test dataframe to datframe
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
# Test dataframe to int
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
# Test dataframe to float
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
# Test transposed dataframes to float
try:
pandas_result = getattr(pandas_df.T, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df.T, op)(4.0)
else:
modin_result = getattr(modin_df.T, op)(4.0)
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
# Test dataframe to different dataframe shape
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
# Test dataframe to list
list_test = random_state.randint(RAND_LOW, RAND_HIGH, size=(modin_df.shape[1]))
try:
pandas_result = getattr(pandas_df, op)(list_test, axis=1)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(list_test, axis=1)
else:
modin_result = getattr(modin_df, op)(list_test, axis=1)
df_equals(modin_result, pandas_result)
# Test dataframe to series
series_test_modin = modin_df[modin_df.columns[0]]
series_test_pandas = pandas_df[pandas_df.columns[0]]
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Test dataframe to series with different index
series_test_modin = modin_df[modin_df.columns[0]].reset_index(drop=True)
series_test_pandas = pandas_df[pandas_df.columns[0]].reset_index(drop=True)
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Level test
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "add")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_div(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "div")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_divide(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "divide")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_floordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "floordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_multiply(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "multiply")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pow(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive
# values
try:
pandas_df = pandas_df.abs()
except Exception:
pass
else:
modin_df = modin_df.abs()
self.inter_df_math_helper(modin_df, pandas_df, "pow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_sub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "sub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_subtract(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "subtract")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_truediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "truediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___div__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__div__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___add__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__add__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___radd__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__radd__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___pow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__pow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rpow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rpow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___sub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__sub__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___floordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__floordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rfloordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rfloordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___truediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__truediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rtruediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rtruediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rdiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rdiv__")
# END test inter df math functions
# Test comparison of inter operation functions
def comparison_inter_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)("a")
except TypeError:
with pytest.raises(TypeError):
repr(getattr(modin_df, op)("a"))
else:
modin_result = getattr(modin_df, op)("a")
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_eq(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "eq")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ge(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ge")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_gt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "gt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_le(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "le")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_lt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "lt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ne(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ne")
# END test comparison of inter operation functions
# Test dataframe right operations
def inter_df_math_right_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_radd(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "radd")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rdiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rdiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rfloordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rfloordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rpow(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive values
# We need to check that negative integers are not used efficiently
if "100x100" not in request.node.name:
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rpow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rsub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rsub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rtruediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rtruediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rsub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "__rsub__")
# END test dataframe right operations
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_abs(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.abs()
except Exception as e:
with pytest.raises(type(e)):
modin_df.abs()
else:
modin_result = modin_df.abs()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_prefix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_prefix = "TEST"
new_modin_df = modin_df.add_prefix(test_prefix)
new_pandas_df = pandas_df.add_prefix(test_prefix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
x = 2
modin_df.applymap(x)
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap_numeric(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_suffix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_suffix = "TEST"
new_modin_df = modin_df.add_suffix(test_suffix)
new_pandas_df = pandas_df.add_suffix(test_suffix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_at(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# We skip nan datasets because nan != nan
if "nan" not in request.node.name:
key1 = modin_df.columns[0]
# Scaler
assert modin_df.at[0, key1] == pandas_df.at[0, key1]
# Series
df_equals(modin_df.loc[0].at[key1], pandas_df.loc[0].at[key1])
# Write Item
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.at[1, key1] = modin_df.at[0, key1]
pandas_df_copy.at[1, key1] = pandas_df.at[0, key1]
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_axes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
for modin_axis, pd_axis in zip(modin_df.axes, pandas_df.axes):
assert np.array_equal(modin_axis, pd_axis)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_copy(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused but there so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
new_modin_df = modin_df.copy()
assert new_modin_df is not modin_df
assert np.array_equal(
new_modin_df._query_compiler._modin_frame._partitions,
modin_df._query_compiler._modin_frame._partitions,
)
assert new_modin_df is not modin_df
df_equals(new_modin_df, modin_df)
# Shallow copy tests
modin_df = pd.DataFrame(data)
modin_df_cp = modin_df.copy(False)
modin_df[modin_df.columns[0]] = 0
df_equals(modin_df, modin_df_cp)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dtypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.dtypes, pandas_df.dtypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ftypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.ftypes, pandas_df.ftypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("key", indices_values, ids=indices_keys)
def test_get(self, data, key):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get(key), pandas_df.get(key))
df_equals(
modin_df.get(key, default="default"), pandas_df.get(key, default="default")
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_dtype_counts(self, data):
modin_result = pd.DataFrame(data).get_dtype_counts().sort_index()
pandas_result = pandas.DataFrame(data).get_dtype_counts().sort_index()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"dummy_na", bool_arg_values, ids=arg_keys("dummy_na", bool_arg_keys)
)
@pytest.mark.parametrize(
"drop_first", bool_arg_values, ids=arg_keys("drop_first", bool_arg_keys)
)
def test_get_dummies(self, request, data, dummy_na, drop_first):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas.get_dummies(
pandas_df, dummy_na=dummy_na, drop_first=drop_first
)
except Exception as e:
with pytest.raises(type(e)):
pd.get_dummies(modin_df, dummy_na=dummy_na, drop_first=drop_first)
else:
modin_result = pd.get_dummies(
modin_df, dummy_na=dummy_na, drop_first=drop_first
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_ftype_counts(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get_ftype_counts(), pandas_df.get_ftype_counts())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg(self, data, axis, func):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg_numeric(self, request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.aggregate(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.aggregate(func, axis)
else:
modin_result = modin_df.aggregate(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate_numeric(self, request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_aggregate_error_checking(self, data):
modin_df = pd.DataFrame(data)
assert modin_df.aggregate("ndim") == 2
with pytest.warns(UserWarning):
modin_df.aggregate(
{modin_df.columns[0]: "sum", modin_df.columns[1]: "mean"}
)
with pytest.warns(UserWarning):
modin_df.aggregate("cumproduct")
with pytest.raises(ValueError):
modin_df.aggregate("NOT_EXISTS")
def test_align(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).align(pd.DataFrame(data))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"bool_only", bool_arg_values, ids=arg_keys("bool_only", bool_arg_keys)
)
def test_all(self, data, axis, skipna, bool_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# Test when axis is None. This will get repeated but easier than using list in parameterize decorator
try:
pandas_result = pandas_df.all(axis=None, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.all(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.all(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.all(
axis=axis, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.all(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.all(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# Test when axis is None. This will get repeated but easier than using list in parameterize decorator
try:
pandas_result = pandas_df.T.all(
axis=None, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.all(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.all(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
for level in list(range(levels)) + (axis_names if axis_names else []):
try:
pandas_multi_level_result = pandas_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
else:
modin_multi_level_result = modin_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"bool_only", bool_arg_values, ids=arg_keys("bool_only", bool_arg_keys)
)
def test_any(self, data, axis, skipna, bool_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.any(axis=None, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.any(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.any(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.any(
axis=axis, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.any(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.any(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.any(
axis=None, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.any(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.any(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
for level in list(range(levels)) + (axis_names if axis_names else []):
try:
pandas_multi_level_result = pandas_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
else:
modin_multi_level_result = modin_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_append(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
data_to_append = {"append_a": 2, "append_b": 1000}
ignore_idx_values = [True, False]
for ignore in ignore_idx_values:
try:
pandas_result = pandas_df.append(data_to_append, ignore_index=ignore)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(data_to_append, ignore_index=ignore)
else:
modin_result = modin_df.append(data_to_append, ignore_index=ignore)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(pandas_df.iloc[-1])
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df.iloc[-1])
else:
modin_result = modin_df.append(modin_df.iloc[-1])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(list(pandas_df.iloc[-1]))
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(list(modin_df.iloc[-1]))
else:
modin_result = modin_df.append(list(modin_df.iloc[-1]))
df_equals(modin_result, pandas_result)
verify_integrity_values = [True, False]
for verify_integrity in verify_integrity_values:
try:
pandas_result = pandas_df.append(
[pandas_df, pandas_df], verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
else:
modin_result = modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(
pandas_df, verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df, verify_integrity=verify_integrity)
else:
modin_result = modin_df.append(
modin_df, verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_apply(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(TypeError):
modin_df.apply({"row": func}, axis=1)
try:
pandas_result = pandas_df.apply(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.apply(func, axis)
else:
modin_result = modin_df.apply(func, axis)
df_equals(modin_result, pandas_result)
def test_apply_metadata(self):
def add(a, b, c):
return a + b + c
data = {"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}
modin_df = pd.DataFrame(data)
modin_df["add"] = modin_df.apply(
lambda row: add(row["A"], row["B"], row["C"]), axis=1
)
pandas_df = pandas.DataFrame(data)
pandas_df["add"] = pandas_df.apply(
lambda row: add(row["A"], row["B"], row["C"]), axis=1
)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_apply_numeric(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.apply(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.apply(func, axis)
else:
modin_result = modin_df.apply(func, axis)
df_equals(modin_result, pandas_result)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
modin_result = modin_df.apply(lambda df: df.drop(key), axis=1)
pandas_result = pandas_df.apply(lambda df: df.drop(key), axis=1)
df_equals(modin_result, pandas_result)
def test_as_blocks(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).as_blocks()
def test_as_matrix(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
mat = frame.as_matrix()
frame_columns = frame.columns
for i, row in enumerate(mat):
for j, value in enumerate(row):
col = frame_columns[j]
if np.isnan(value):
assert np.isnan(frame[col][i])
else:
assert value == frame[col][i]
# mixed type
mat = pd.DataFrame(test_data.mixed_frame).as_matrix(["foo", "A"])
assert mat[0, 0] == "bar"
df = pd.DataFrame({"real": [1, 2, 3], "complex": [1j, 2j, 3j]})
mat = df.as_matrix()
assert mat[0, 1] == 1j
# single block corner case
mat = pd.DataFrame(test_data.frame).as_matrix(["A", "B"])
expected = test_data.frame.reindex(columns=["A", "B"]).values
tm.assert_almost_equal(mat, expected)
def test_to_numpy(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
assert_array_equal(frame.values, test_data.frame.values)
def test_partition_to_numpy(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
for (
partition
) in frame._query_compiler._modin_frame._partitions.flatten().tolist():
assert_array_equal(partition.to_pandas().values, partition.to_numpy())
def test_asfreq(self):
index = pd.date_range("1/1/2000", periods=4, freq="T")
series = pd.Series([0.0, None, 2.0, 3.0], index=index)
df = pd.DataFrame({"s": series})
with pytest.warns(UserWarning):
# We are only testing that this defaults to pandas, so we will just check for
# the warning
df.asfreq(freq="30S")
def test_asof(self):
df = pd.DataFrame(
{"a": [10, 20, 30, 40, 50], "b": [None, None, None, None, 500]},
index=pd.DatetimeIndex(
[
"2018-02-27 09:01:00",
"2018-02-27 09:02:00",
"2018-02-27 09:03:00",
"2018-02-27 09:04:00",
"2018-02-27 09:05:00",
]
),
)
with pytest.warns(UserWarning):
df.asof(pd.DatetimeIndex(["2018-02-27 09:03:30", "2018-02-27 09:04:30"]))
def test_assign(self):
data = test_data_values[0]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.warns(UserWarning):
modin_result = modin_df.assign(new_column=pd.Series(modin_df.iloc[:, 0]))
pandas_result = pandas_df.assign(new_column=pd.Series(pandas_df.iloc[:, 0]))
df_equals(modin_result, pandas_result)
def test_astype(self):
td = TestData()
modin_df = pd.DataFrame(
td.frame.values, index=td.frame.index, columns=td.frame.columns
)
expected_df = pandas.DataFrame(
td.frame.values, index=td.frame.index, columns=td.frame.columns
)
modin_df_casted = modin_df.astype(np.int32)
expected_df_casted = expected_df.astype(np.int32)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(np.float64)
expected_df_casted = expected_df.astype(np.float64)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(str)
expected_df_casted = expected_df.astype(str)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype("category")
expected_df_casted = expected_df.astype("category")
df_equals(modin_df_casted, expected_df_casted)
dtype_dict = {"A": np.int32, "B": np.int64, "C": str}
modin_df_casted = modin_df.astype(dtype_dict)
expected_df_casted = expected_df.astype(dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
# Ignore lint because this is testing bad input
bad_dtype_dict = {"B": np.int32, "B": np.int64, "B": str} # noqa F601
modin_df_casted = modin_df.astype(bad_dtype_dict)
expected_df_casted = expected_df.astype(bad_dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
with pytest.raises(KeyError):
modin_df.astype({"not_exists": np.uint8})
def test_astype_category(self):
modin_df = pd.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
pandas_df = pandas.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
modin_result = modin_df.astype({"col1": "category"})
pandas_result = pandas_df.astype({"col1": "category"})
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
modin_result = modin_df.astype("category")
pandas_result = pandas_df.astype("category")
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
def test_at_time(self):
i = pd.date_range("2018-04-09", periods=4, freq="12H")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.at_time("12:00")
def test_between_time(self):
i = pd.date_range("2018-04-09", periods=4, freq="12H")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.between_time("0:15", "0:45")
def test_bfill(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(modin_df.bfill(), test_data.tsframe.bfill())
def test_blocks(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).blocks
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_bool(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(ValueError):
modin_df.bool()
modin_df.__bool__()
single_bool_pandas_df = pandas.DataFrame([True])
single_bool_modin_df = pd.DataFrame([True])
assert single_bool_pandas_df.bool() == single_bool_modin_df.bool()
with pytest.raises(ValueError):
# __bool__ always raises this error for DataFrames
single_bool_modin_df.__bool__()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_boxplot(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
assert modin_df.boxplot() == to_pandas(modin_df).boxplot()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower, upper = np.sort(random_state.random_integers(RAND_LOW, RAND_HIGH, 2))
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test only upper scalar bound
modin_result = modin_df.clip(None, upper, axis=axis)
pandas_result = pandas_df.clip(None, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper scalar bound
modin_result = modin_df.clip(lower, upper, axis=axis)
pandas_result = pandas_df.clip(lower, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper list bound on each column
modin_result = modin_df.clip(lower_list, upper_list, axis=axis)
pandas_result = pandas_df.clip(lower_list, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
# test only upper list bound on each column
modin_result = modin_df.clip(np.nan, upper_list, axis=axis)
pandas_result = pandas_df.clip(np.nan, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
with pytest.raises(ValueError):
modin_df.clip(lower=[1, 2, 3], axis=None)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip_lower(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower = random_state.random_integers(RAND_LOW, RAND_HIGH, 1)[0]
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test lower scalar bound
pandas_result = pandas_df.clip_lower(lower, axis=axis)
modin_result = modin_df.clip_lower(lower, axis=axis)
df_equals(modin_result, pandas_result)
# test lower list bound on each column
pandas_result = pandas_df.clip_lower(lower_list, axis=axis)
modin_result = modin_df.clip_lower(lower_list, axis=axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip_upper(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
upper = random_state.random_integers(RAND_LOW, RAND_HIGH, 1)[0]
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test upper scalar bound
modin_result = modin_df.clip_upper(upper, axis=axis)
pandas_result = pandas_df.clip_upper(upper, axis=axis)
df_equals(modin_result, pandas_result)
# test upper list bound on each column
modin_result = modin_df.clip_upper(upper_list, axis=axis)
pandas_result = pandas_df.clip_upper(upper_list, axis=axis)
df_equals(modin_result, pandas_result)
def test_combine(self):
df1 = pd.DataFrame({"A": [0, 0], "B": [4, 4]})
df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
with pytest.warns(UserWarning):
df1.combine(df2, lambda s1, s2: s1 if s1.sum() < s2.sum() else s2)
def test_combine_first(self):
df1 = pd.DataFrame({"A": [None, 0], "B": [None, 4]})
df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
with pytest.warns(UserWarning):
df1.combine_first(df2)
def test_compound(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).compound()
def test_corr(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).corr()
def test_corrwith(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).corrwith(pd.DataFrame(data))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_count(self, request, data, axis, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.count(axis=axis, numeric_only=numeric_only)
pandas_result = pandas_df.count(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.count(axis=axis, numeric_only=numeric_only)
pandas_result = pandas_df.T.count(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
try: # test error
pandas_df_multi_level.count(
axis=1, numeric_only=numeric_only, level=0
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.count(
axis=1, numeric_only=numeric_only, level=0
)
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
try: # test error
pandas_df_multi_level.count(
axis=0, numeric_only=numeric_only, level=0
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.count(
axis=0, numeric_only=numeric_only, level=0
)
for level in list(range(levels)) + (axis_names if axis_names else []):
modin_multi_level_result = modin_df_multi_level.count(
axis=axis, numeric_only=numeric_only, level=level
)
pandas_multi_level_result = pandas_df_multi_level.count(
axis=axis, numeric_only=numeric_only, level=level
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
def test_cov(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).cov()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummax(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cummax(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cummax(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cummax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cummax(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cummax(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cummax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummin(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cummin(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cummin(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cummin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cummin(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cummin(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cummin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumprod(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cumprod(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cumprod(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cumprod(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cumprod(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cumprod(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cumprod(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumsum(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# pandas exhibits weird behavior for this case
# Remove this case when we can pull the error messages from backend
if name_contains(request.node.name, ["datetime_timedelta_data"]) and (
axis == 0 or axis == "rows"
):
with pytest.raises(TypeError):
modin_df.cumsum(axis=axis, skipna=skipna)
else:
try:
pandas_result = pandas_df.cumsum(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cumsum(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cumsum(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
if name_contains(request.node.name, ["datetime_timedelta_data"]) and (
axis == 0 or axis == "rows"
):
with pytest.raises(TypeError):
modin_df.T.cumsum(axis=axis, skipna=skipna)
else:
try:
pandas_result = pandas_df.T.cumsum(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cumsum(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cumsum(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_describe(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.describe(), pandas_df.describe())
percentiles = [0.10, 0.11, 0.44, 0.78, 0.99]
df_equals(
modin_df.describe(percentiles=percentiles),
pandas_df.describe(percentiles=percentiles),
)
try:
pandas_result = pandas_df.describe(exclude=[np.float64])
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(exclude=[np.float64])
else:
modin_result = modin_df.describe(exclude=[np.float64])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.describe(exclude=np.float64)
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(exclude=np.float64)
else:
modin_result = modin_df.describe(exclude=np.float64)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
else:
modin_result = modin_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
df_equals(modin_result, pandas_result)
modin_result = modin_df.describe(include=str(modin_df.dtypes.values[0]))
pandas_result = pandas_df.describe(include=str(pandas_df.dtypes.values[0]))
df_equals(modin_result, pandas_result)
modin_result = modin_df.describe(include=[np.number])
pandas_result = pandas_df.describe(include=[np.number])
df_equals(modin_result, pandas_result)
df_equals(modin_df.describe(include="all"), pandas_df.describe(include="all"))
modin_df = pd.DataFrame(data).applymap(str)
pandas_df = pandas.DataFrame(data).applymap(str)
try:
df_equals(modin_df.describe(), pandas_df.describe())
except AssertionError:
# We have to do this because we choose the highest count slightly differently
# than pandas. Because there is no true guarantee which one will be first,
# If they don't match, make sure that the `freq` is the same at least.
df_equals(
modin_df.describe().loc[["count", "unique", "freq"]],
pandas_df.describe().loc[["count", "unique", "freq"]],
)
def test_describe_dtypes(self):
modin_df = pd.DataFrame(
{
"col1": list("abc"),
"col2": list("abc"),
"col3": list("abc"),
"col4": [1, 2, 3],
}
)
pandas_df = pandas.DataFrame(
{
"col1": list("abc"),
"col2": list("abc"),
"col3": list("abc"),
"col4": [1, 2, 3],
}
)
modin_result = modin_df.describe()
pandas_result = pandas_df.describe()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"periods", int_arg_values, ids=arg_keys("periods", int_arg_keys)
)
def test_diff(self, request, data, axis, periods):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.diff(axis=axis, periods=periods)
except Exception as e:
with pytest.raises(type(e)):
modin_df.diff(axis=axis, periods=periods)
else:
modin_result = modin_df.diff(axis=axis, periods=periods)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.diff(axis=axis, periods=periods)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.diff(axis=axis, periods=periods)
else:
modin_result = modin_df.T.diff(axis=axis, periods=periods)
df_equals(modin_result, pandas_result)
def test_drop(self):
frame_data = {"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]}
simple = pandas.DataFrame(frame_data)
modin_simple = pd.DataFrame(frame_data)
df_equals(modin_simple.drop("A", axis=1), simple[["B"]])
df_equals(modin_simple.drop(["A", "B"], axis="columns"), simple[[]])
df_equals(modin_simple.drop([0, 1, 3], axis=0), simple.loc[[2], :])
df_equals(modin_simple.drop([0, 3], axis="index"), simple.loc[[1, 2], :])
pytest.raises(ValueError, modin_simple.drop, 5)
pytest.raises(ValueError, modin_simple.drop, "C", 1)
pytest.raises(ValueError, modin_simple.drop, [1, 5])
pytest.raises(ValueError, modin_simple.drop, ["A", "C"], 1)
# errors = 'ignore'
df_equals(modin_simple.drop(5, errors="ignore"), simple)
df_equals(modin_simple.drop([0, 5], errors="ignore"), simple.loc[[1, 2, 3], :])
df_equals(modin_simple.drop("C", axis=1, errors="ignore"), simple)
df_equals(modin_simple.drop(["A", "C"], axis=1, errors="ignore"), simple[["B"]])
# non-unique
nu_df = pandas.DataFrame(
zip(range(3), range(-3, 1), list("abc")), columns=["a", "a", "b"]
)
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("a", axis=1), nu_df[["b"]])
df_equals(modin_nu_df.drop("b", axis="columns"), nu_df["a"])
df_equals(modin_nu_df.drop([]), nu_df)
nu_df = nu_df.set_index(pandas.Index(["X", "Y", "X"]))
nu_df.columns = list("abc")
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("X", axis="rows"), nu_df.loc[["Y"], :])
df_equals(modin_nu_df.drop(["X", "Y"], axis=0), nu_df.loc[[], :])
# inplace cache issue
frame_data = random_state.randn(10, 3)
df = pandas.DataFrame(frame_data, columns=list("abc"))
modin_df = pd.DataFrame(frame_data, columns=list("abc"))
expected = df[~(df.b > 0)]
modin_df.drop(labels=df[df.b > 0].index, inplace=True)
df_equals(modin_df, expected)
midx = pd.MultiIndex(
levels=[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
df = pd.DataFrame(
index=midx,
columns=["big", "small"],
data=[
[45, 30],
[200, 100],
[1.5, 1],
[30, 20],
[250, 150],
[1.5, 0.8],
[320, 250],
[1, 0.8],
[0.3, 0.2],
],
)
with pytest.warns(UserWarning):
df.drop(index="length", level=1)
def test_drop_api_equivalence(self):
# equivalence of the labels/axis and index/columns API's
frame_data = [[1, 2, 3], [3, 4, 5], [5, 6, 7]]
modin_df = pd.DataFrame(
frame_data, index=["a", "b", "c"], columns=["d", "e", "f"]
)
modin_df1 = modin_df.drop("a")
modin_df2 = modin_df.drop(index="a")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop("d", 1)
modin_df2 = modin_df.drop(columns="d")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(labels="e", axis=1)
modin_df2 = modin_df.drop(columns="e")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0)
modin_df2 = modin_df.drop(index=["a"])
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0).drop(["d"], axis=1)
modin_df2 = modin_df.drop(index=["a"], columns=["d"])
df_equals(modin_df1, modin_df2)
with pytest.raises(ValueError):
modin_df.drop(labels="a", index="b")
with pytest.raises(ValueError):
modin_df.drop(labels="a", columns="b")
with pytest.raises(ValueError):
modin_df.drop(axis=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_drop_transpose(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.T.drop(columns=[0, 1, 2])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(index=["col3", "col1"])
pandas_result = pandas_df.T.drop(index=["col3", "col1"])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
df_equals(modin_result, pandas_result)
def test_droplevel(self):
df = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis(["a", "b"])
)
df.columns = pd.MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
with pytest.warns(UserWarning):
df.droplevel("a")
with pytest.warns(UserWarning):
df.droplevel("level_2", axis=1)
@pytest.mark.parametrize(
"data", test_data_with_duplicates_values, ids=test_data_with_duplicates_keys
)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
@pytest.mark.parametrize(
"subset", [None, ["col1", "col3", "col7"]], ids=["None", "subset"]
)
def test_drop_duplicates(self, data, keep, subset):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.drop_duplicates(keep=keep, inplace=False, subset=subset),
pandas_df.drop_duplicates(keep=keep, inplace=False, subset=subset),
)
modin_results = modin_df.drop_duplicates(keep=keep, inplace=True, subset=subset)
pandas_results = pandas_df.drop_duplicates(
keep=keep, inplace=True, subset=subset
)
df_equals(modin_results, pandas_results)
def test_drop_duplicates_with_missing_index_values(self):
data = {
"columns": ["value", "time", "id"],
"index": [
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
20,
21,
22,
23,
24,
25,
26,
27,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
],
"data": [
["3", 1279213398000.0, 88.0],
["3", 1279204682000.0, 88.0],
["0", 1245772835000.0, 448.0],
["0", 1270564258000.0, 32.0],
["0", 1267106669000.0, 118.0],
["7", 1300621123000.0, 5.0],
["0", 1251130752000.0, 957.0],
["0", 1311683506000.0, 62.0],
["9", 1283692698000.0, 89.0],
["9", 1270234253000.0, 64.0],
["0", 1285088818000.0, 50.0],
["0", 1218212725000.0, 695.0],
["2", 1383933968000.0, 348.0],
["0", 1368227625000.0, 257.0],
["1", 1454514093000.0, 446.0],
["1", 1428497427000.0, 134.0],
["1", 1459184936000.0, 568.0],
["1", 1502293302000.0, 599.0],
["1", 1491833358000.0, 829.0],
["1", 1485431534000.0, 806.0],
["8", 1351800505000.0, 101.0],
["0", 1357247721000.0, 916.0],
["0", 1335804423000.0, 370.0],
["24", 1327547726000.0, 720.0],
["0", 1332334140000.0, 415.0],
["0", 1309543100000.0, 30.0],
["18", 1309541141000.0, 30.0],
["0", 1298979435000.0, 48.0],
["14", 1276098160000.0, 59.0],
["0", 1233936302000.0, 109.0],
],
}
pandas_df = pandas.DataFrame(
data["data"], index=data["index"], columns=data["columns"]
)
modin_df = pd.DataFrame(
data["data"], index=data["index"], columns=data["columns"]
)
modin_result = modin_df.sort_values(["id", "time"]).drop_duplicates(["id"])
pandas_result = pandas_df.sort_values(["id", "time"]).drop_duplicates(["id"])
df_equals(modin_result, pandas_result)
def test_drop_duplicates_after_sort(self):
data = [
{"value": 1, "time": 2},
{"value": 1, "time": 1},
{"value": 2, "time": 1},
{"value": 2, "time": 2},
]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.sort_values(["value", "time"]).drop_duplicates(
["value"]
)
pandas_result = pandas_df.sort_values(["value", "time"]).drop_duplicates(
["value"]
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("how", ["any", "all"], ids=["any", "all"])
def test_dropna(self, data, axis, how):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.dropna(axis=axis, how="invalid")
with pytest.raises(TypeError):
modin_df.dropna(axis=axis, how=None, thresh=None)
with pytest.raises(KeyError):
modin_df.dropna(axis=axis, subset=["NotExists"], how=how)
modin_result = modin_df.dropna(axis=axis, how=how)
pandas_result = pandas_df.dropna(axis=axis, how=how)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_inplace(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.dropna()
modin_df.dropna(inplace=True)
df_equals(modin_df, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(thresh=2, inplace=True)
modin_df.dropna(thresh=2, inplace=True)
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(axis=1, how="any", inplace=True)
modin_df.dropna(axis=1, how="any", inplace=True)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_multiple_axes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.dropna(how="all", axis=[0, 1]),
pandas_df.dropna(how="all", axis=[0, 1]),
)
df_equals(
modin_df.dropna(how="all", axis=(0, 1)),
pandas_df.dropna(how="all", axis=(0, 1)),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_multiple_axes_inplace(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.dropna(how="all", axis=[0, 1], inplace=True)
pandas_df_copy.dropna(how="all", axis=[0, 1], inplace=True)
df_equals(modin_df_copy, pandas_df_copy)
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.dropna(how="all", axis=(0, 1), inplace=True)
pandas_df_copy.dropna(how="all", axis=(0, 1), inplace=True)
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_subset(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
column_subset = modin_df.columns[0:2]
df_equals(
modin_df.dropna(how="all", subset=column_subset),
pandas_df.dropna(how="all", subset=column_subset),
)
df_equals(
modin_df.dropna(how="any", subset=column_subset),
pandas_df.dropna(how="any", subset=column_subset),
)
row_subset = modin_df.index[0:2]
df_equals(
modin_df.dropna(how="all", axis=1, subset=row_subset),
pandas_df.dropna(how="all", axis=1, subset=row_subset),
)
df_equals(
modin_df.dropna(how="any", axis=1, subset=row_subset),
pandas_df.dropna(how="any", axis=1, subset=row_subset),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_subset_error(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
with pytest.raises(KeyError):
modin_df.dropna(subset=list("EF"))
if len(modin_df.columns) < 5:
with pytest.raises(KeyError):
modin_df.dropna(axis=1, subset=[4, 5])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dot(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
col_len = len(modin_df.columns)
# Test list input
arr = np.arange(col_len)
modin_result = modin_df.dot(arr)
pandas_result = pandas_df.dot(arr)
df_equals(modin_result, pandas_result)
# Test bad dimensions
with pytest.raises(ValueError):
modin_result = modin_df.dot(np.arange(col_len + 10))
# Test series input
modin_series = pd.Series(np.arange(col_len), index=modin_df.columns)
pandas_series = pandas.Series(np.arange(col_len), index=modin_df.columns)
modin_result = modin_df.dot(modin_series)
pandas_result = pandas_df.dot(pandas_series)
df_equals(modin_result, pandas_result)
# Test when input series index doesn't line up with columns
with pytest.raises(ValueError):
modin_result = modin_df.dot(pd.Series(np.arange(col_len)))
with pytest.warns(UserWarning):
modin_df.dot(modin_df.T)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
def test_duplicated(self, data, keep):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.duplicated(keep=keep)
modin_result = modin_df.duplicated(keep=keep)
df_equals(modin_result, pandas_result)
import random
subset = random.sample(
list(pandas_df.columns), random.randint(1, len(pandas_df.columns))
)
pandas_result = pandas_df.duplicated(keep=keep, subset=subset)
modin_result = modin_df.duplicated(keep=keep, subset=subset)
df_equals(modin_result, pandas_result)
def test_empty_df(self):
df = pd.DataFrame(index=["a", "b"])
df_is_empty(df)
tm.assert_index_equal(df.index, pd.Index(["a", "b"]))
assert len(df.columns) == 0
df = pd.DataFrame(columns=["a", "b"])
df_is_empty(df)
assert len(df.index) == 0
tm.assert_index_equal(df.columns, pd.Index(["a", "b"]))
df = pd.DataFrame()
df_is_empty(df)
assert len(df.index) == 0
assert len(df.columns) == 0
df = pd.DataFrame(index=["a", "b"])
df_is_empty(df)
tm.assert_index_equal(df.index, pd.Index(["a", "b"]))
assert len(df.columns) == 0
df = pd.DataFrame(columns=["a", "b"])
df_is_empty(df)
assert len(df.index) == 0
tm.assert_index_equal(df.columns, pd.Index(["a", "b"]))
df = pd.DataFrame()
df_is_empty(df)
assert len(df.index) == 0
assert len(df.columns) == 0
def test_equals(self):
frame_data = {"col1": [2.9, 3, 3, 3], "col2": [2, 3, 4, 1]}
modin_df1 = pd.DataFrame(frame_data)
modin_df2 = pd.DataFrame(frame_data)
assert modin_df1.equals(modin_df2)
df_equals(modin_df1, modin_df2)
df_equals(modin_df1, pd.DataFrame(modin_df1))
frame_data = {"col1": [2.9, 3, 3, 3], "col2": [2, 3, 5, 1]}
modin_df3 = pd.DataFrame(frame_data, index=list("abcd"))
assert not modin_df1.equals(modin_df3)
with pytest.raises(AssertionError):
df_equals(modin_df3, modin_df1)
with pytest.raises(AssertionError):
df_equals(modin_df3, modin_df2)
assert modin_df1.equals(modin_df2._query_compiler.to_pandas())
def test_eval_df_use_case(self):
frame_data = {"a": random_state.randn(10), "b": random_state.randn(10)}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
# test eval for series results
tmp_pandas = df.eval("arctan2(sin(a), b)", engine="python", parser="pandas")
tmp_modin = modin_df.eval(
"arctan2(sin(a), b)", engine="python", parser="pandas"
)
assert isinstance(tmp_modin, pd.Series)
df_equals(tmp_modin, tmp_pandas)
# Test not inplace assignments
tmp_pandas = df.eval("e = arctan2(sin(a), b)", engine="python", parser="pandas")
tmp_modin = modin_df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas"
)
df_equals(tmp_modin, tmp_pandas)
# Test inplace assignments
df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas", inplace=True
)
modin_df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas", inplace=True
)
# TODO: Use a series equality validator.
df_equals(modin_df, df)
def test_eval_df_arithmetic_subexpression(self):
frame_data = {"a": random_state.randn(10), "b": random_state.randn(10)}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df.eval("not_e = sin(a + b)", engine="python", parser="pandas", inplace=True)
modin_df.eval(
"not_e = sin(a + b)", engine="python", parser="pandas", inplace=True
)
# TODO: Use a series equality validator.
df_equals(modin_df, df)
def test_ewm(self):
df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})
with pytest.warns(UserWarning):
df.ewm(com=0.5).mean()
def test_expanding(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).expanding()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_explode(self, data):
modin_df = pd.DataFrame(data)
with pytest.warns(UserWarning):
modin_df.explode(modin_df.columns[0])
def test_ffill(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(modin_df.ffill(), test_data.tsframe.ffill())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"method",
["backfill", "bfill", "pad", "ffill", None],
ids=["backfill", "bfill", "pad", "ffill", "None"],
)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("limit", int_arg_values, ids=int_arg_keys)
def test_fillna(self, data, method, axis, limit):
# We are not testing when limit is not positive until pandas-27042 gets fixed.
# We are not testing when axis is over rows until pandas-17399 gets fixed.
if limit > 0 and axis != 1 and axis != "columns":
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.fillna(
0, method=method, axis=axis, limit=limit
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.fillna(0, method=method, axis=axis, limit=limit)
else:
modin_result = modin_df.fillna(0, method=method, axis=axis, limit=limit)
df_equals(modin_result, pandas_result)
def test_fillna_sanity(self):
test_data = TestData()
tf = test_data.tsframe
tf.loc[tf.index[:5], "A"] = np.nan
tf.loc[tf.index[-5:], "A"] = np.nan
zero_filled = test_data.tsframe.fillna(0)
modin_df = pd.DataFrame(test_data.tsframe).fillna(0)
df_equals(modin_df, zero_filled)
padded = test_data.tsframe.fillna(method="pad")
modin_df = pd.DataFrame(test_data.tsframe).fillna(method="pad")
df_equals(modin_df, padded)
# mixed type
mf = test_data.mixed_frame
mf.loc[mf.index[5:20], "foo"] = np.nan
mf.loc[mf.index[-10:], "A"] = np.nan
result = test_data.mixed_frame.fillna(value=0)
modin_df = pd.DataFrame(test_data.mixed_frame).fillna(value=0)
df_equals(modin_df, result)
result = test_data.mixed_frame.fillna(method="pad")
modin_df = pd.DataFrame(test_data.mixed_frame).fillna(method="pad")
df_equals(modin_df, result)
pytest.raises(ValueError, test_data.tsframe.fillna)
pytest.raises(ValueError, pd.DataFrame(test_data.tsframe).fillna)
with pytest.raises(ValueError):
pd.DataFrame(test_data.tsframe).fillna(5, method="ffill")
# mixed numeric (but no float16)
mf = test_data.mixed_float.reindex(columns=["A", "B", "D"])
mf.loc[mf.index[-10:], "A"] = np.nan
result = mf.fillna(value=0)
modin_df = pd.DataFrame(mf).fillna(value=0)
df_equals(modin_df, result)
result = mf.fillna(method="pad")
modin_df = pd.DataFrame(mf).fillna(method="pad")
df_equals(modin_df, result)
# TODO: Use this when Arrow issue resolves:
# (https://issues.apache.org/jira/browse/ARROW-2122)
# empty frame
# df = DataFrame(columns=['x'])
# for m in ['pad', 'backfill']:
# df.x.fillna(method=m, inplace=True)
# df.x.fillna(method=m)
# with different dtype
frame_data = [
["a", "a", np.nan, "a"],
["b", "b", np.nan, "b"],
["c", "c", np.nan, "c"],
]
df = pandas.DataFrame(frame_data)
result = df.fillna({2: "foo"})
modin_df = pd.DataFrame(frame_data).fillna({2: "foo"})
df_equals(modin_df, result)
modin_df = pd.DataFrame(df)
df.fillna({2: "foo"}, inplace=True)
modin_df.fillna({2: "foo"}, inplace=True)
df_equals(modin_df, result)
frame_data = {
"Date": [pandas.NaT, pandas.Timestamp("2014-1-1")],
"Date2": [pandas.Timestamp("2013-1-1"), pandas.NaT],
}
df = pandas.DataFrame(frame_data)
result = df.fillna(value={"Date": df["Date2"]})
modin_df = pd.DataFrame(frame_data).fillna(value={"Date": df["Date2"]})
df_equals(modin_df, result)
# TODO: Use this when Arrow issue resolves:
# (https://issues.apache.org/jira/browse/ARROW-2122)
# with timezone
"""
frame_data = {'A': [pandas.Timestamp('2012-11-11 00:00:00+01:00'),
pandas.NaT]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.fillna(method='pad'), df.fillna(method='pad'))
frame_data = {'A': [pandas.NaT,
pandas.Timestamp('2012-11-11 00:00:00+01:00')]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data).fillna(method='bfill')
df_equals(modin_df, df.fillna(method='bfill'))
"""
def test_fillna_downcast(self):
# infer int64 from float64
frame_data = {"a": [1.0, np.nan]}
df = pandas.DataFrame(frame_data)
result = df.fillna(0, downcast="infer")
modin_df = pd.DataFrame(frame_data).fillna(0, downcast="infer")
df_equals(modin_df, result)
# infer int64 from float64 when fillna value is a dict
df = pandas.DataFrame(frame_data)
result = df.fillna({"a": 0}, downcast="infer")
modin_df = pd.DataFrame(frame_data).fillna({"a": 0}, downcast="infer")
df_equals(modin_df, result)
def test_ffill2(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(
modin_df.fillna(method="ffill"), test_data.tsframe.fillna(method="ffill")
)
def test_bfill2(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(
modin_df.fillna(method="bfill"), test_data.tsframe.fillna(method="bfill")
)
def test_fillna_inplace(self):
frame_data = random_state.randn(10, 4)
df = pandas.DataFrame(frame_data)
df[1][:4] = np.nan
df[3][-4:] = np.nan
modin_df = pd.DataFrame(df)
df.fillna(value=0, inplace=True)
try:
df_equals(modin_df, df)
except AssertionError:
pass
else:
assert False
modin_df.fillna(value=0, inplace=True)
df_equals(modin_df, df)
modin_df = pd.DataFrame(df).fillna(value={0: 0}, inplace=True)
assert modin_df is None
df[1][:4] = np.nan
df[3][-4:] = np.nan
modin_df = pd.DataFrame(df)
df.fillna(method="ffill", inplace=True)
try:
df_equals(modin_df, df)
except AssertionError:
pass
else:
assert False
modin_df.fillna(method="ffill", inplace=True)
df_equals(modin_df, df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_frame_fillna_limit(self, data):
pandas_df = pandas.DataFrame(data)
index = pandas_df.index
result = pandas_df[:2].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="pad", limit=2), result.fillna(method="pad", limit=2)
)
result = pandas_df[-2:].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="backfill", limit=2),
result.fillna(method="backfill", limit=2),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_frame_pad_backfill_limit(self, data):
pandas_df = pandas.DataFrame(data)
index = pandas_df.index
result = pandas_df[:2].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="pad", limit=2), result.fillna(method="pad", limit=2)
)
result = pandas_df[-2:].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="backfill", limit=2),
result.fillna(method="backfill", limit=2),
)
def test_fillna_dtype_conversion(self):
# make sure that fillna on an empty frame works
df = pandas.DataFrame(index=range(3), columns=["A", "B"], dtype="float64")
modin_df = pd.DataFrame(index=range(3), columns=["A", "B"], dtype="float64")
df_equals(modin_df.fillna("nan"), df.fillna("nan"))
frame_data = {"A": [1, np.nan], "B": [1.0, 2.0]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
for v in ["", 1, np.nan, 1.0]:
df_equals(modin_df.fillna(v), df.fillna(v))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_skip_certain_blocks(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# don't try to fill boolean, int blocks
df_equals(modin_df.fillna(np.nan), pandas_df.fillna(np.nan))
def test_fillna_dict_series(self):
frame_data = {
"a": [np.nan, 1, 2, np.nan, np.nan],
"b": [1, 2, 3, np.nan, np.nan],
"c": [np.nan, 1, 2, 3, 4],
}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.fillna({"a": 0, "b": 5}), df.fillna({"a": 0, "b": 5}))
df_equals(
modin_df.fillna({"a": 0, "b": 5, "d": 7}),
df.fillna({"a": 0, "b": 5, "d": 7}),
)
# Series treated same as dict
df_equals(modin_df.fillna(modin_df.max()), df.fillna(df.max()))
def test_fillna_dataframe(self):
frame_data = {
"a": [np.nan, 1, 2, np.nan, np.nan],
"b": [1, 2, 3, np.nan, np.nan],
"c": [np.nan, 1, 2, 3, 4],
}
df = pandas.DataFrame(frame_data, index=list("VWXYZ"))
modin_df = pd.DataFrame(frame_data, index=list("VWXYZ"))
# df2 may have different index and columns
df2 = pandas.DataFrame(
{
"a": [np.nan, 10, 20, 30, 40],
"b": [50, 60, 70, 80, 90],
"foo": ["bar"] * 5,
},
index=list("VWXuZ"),
)
modin_df2 = pd.DataFrame(df2)
# only those columns and indices which are shared get filled
df_equals(modin_df.fillna(modin_df2), df.fillna(df2))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_columns(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.fillna(method="ffill", axis=1),
pandas_df.fillna(method="ffill", axis=1),
)
df_equals(
modin_df.fillna(method="ffill", axis=1),
pandas_df.fillna(method="ffill", axis=1),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_invalid_method(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with tm.assert_raises_regex(ValueError, "ffil"):
modin_df.fillna(method="ffil")
def test_fillna_invalid_value(self):
test_data = TestData()
modin_df = pd.DataFrame(test_data.frame)
# list
pytest.raises(TypeError, modin_df.fillna, [1, 2])
# tuple
pytest.raises(TypeError, modin_df.fillna, (1, 2))
# frame with series
pytest.raises(TypeError, modin_df.iloc[:, 0].fillna, modin_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_col_reordering(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.fillna(method="ffill"), pandas_df.fillna(method="ffill"))
"""
TODO: Use this when Arrow issue resolves:
(https://issues.apache.org/jira/browse/ARROW-2122)
def test_fillna_datetime_columns(self):
frame_data = {'A': [-1, -2, np.nan],
'B': date_range('20130101', periods=3),
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]}
df = pandas.DataFrame(frame_data, index=date_range('20130110', periods=3))
modin_df = pd.DataFrame(frame_data, index=date_range('20130110', periods=3))
df_equals(modin_df.fillna('?'), df.fillna('?'))
frame_data = {'A': [-1, -2, np.nan],
'B': [pandas.Timestamp('2013-01-01'),
pandas.Timestamp('2013-01-02'), pandas.NaT],
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]}
df = pandas.DataFrame(frame_data, index=date_range('20130110', periods=3))
modin_df = pd.DataFrame(frame_data, index=date_range('20130110', periods=3))
df_equals(modin_df.fillna('?'), df.fillna('?'))
"""
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_filter(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
by = {"items": ["col1", "col5"], "regex": "4$|3$", "like": "col"}
df_equals(
modin_df.filter(items=by["items"]), pandas_df.filter(items=by["items"])
)
df_equals(
modin_df.filter(regex=by["regex"], axis=0),
pandas_df.filter(regex=by["regex"], axis=0),
)
df_equals(
modin_df.filter(regex=by["regex"], axis=1),
pandas_df.filter(regex=by["regex"], axis=1),
)
df_equals(modin_df.filter(like=by["like"]), pandas_df.filter(like=by["like"]))
with pytest.raises(TypeError):
modin_df.filter(items=by["items"], regex=by["regex"])
with pytest.raises(TypeError):
modin_df.filter()
def test_first(self):
i = pd.date_range("2018-04-09", periods=4, freq="2D")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.first("3D")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_first_valid_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.first_valid_index() == (pandas_df.first_valid_index())
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_dict(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_dict(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_items(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_items(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_records(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_records(None)
def test_get_value(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).get_value(0, "col1")
def test_get_values(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).get_values()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("n", int_arg_values, ids=arg_keys("n", int_arg_keys))
def test_head(self, data, n):
# Test normal dataframe head
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.head(n), pandas_df.head(n))
df_equals(modin_df.head(len(modin_df) + 1), pandas_df.head(len(pandas_df) + 1))
# Test head when we call it from a QueryCompilerView
modin_result = modin_df.loc[:, ["col1", "col3", "col3"]].head(n)
pandas_result = pandas_df.loc[:, ["col1", "col3", "col3"]].head(n)
df_equals(modin_result, pandas_result)
def test_hist(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).hist(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iat(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.iat()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_idxmax(self, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.idxmax(axis=axis, skipna=skipna)
modin_result = modin_df.idxmax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
pandas_result = pandas_df.T.idxmax(axis=axis, skipna=skipna)
modin_result = modin_df.T.idxmax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_idxmin(self, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.idxmin(axis=axis, skipna=skipna)
pandas_result = pandas_df.idxmin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.idxmin(axis=axis, skipna=skipna)
pandas_result = pandas_df.T.idxmin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
def test_infer_objects(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).infer_objects()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iloc(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if not name_contains(request.node.name, ["empty_data"]):
# Scaler
np.testing.assert_equal(modin_df.iloc[0, 1], pandas_df.iloc[0, 1])
# Series
df_equals(modin_df.iloc[0], pandas_df.iloc[0])
df_equals(modin_df.iloc[1:, 0], pandas_df.iloc[1:, 0])
df_equals(modin_df.iloc[1:2, 0], pandas_df.iloc[1:2, 0])
# DataFrame
df_equals(modin_df.iloc[[1, 2]], pandas_df.iloc[[1, 2]])
# See issue #80
# df_equals(modin_df.iloc[[1, 2], [1, 0]], pandas_df.iloc[[1, 2], [1, 0]])
df_equals(modin_df.iloc[1:2, 0:2], pandas_df.iloc[1:2, 0:2])
# Issue #43
modin_df.iloc[0:3, :]
# Write Item
modin_df.iloc[[1, 2]] = 42
pandas_df.iloc[[1, 2]] = 42
df_equals(modin_df, pandas_df)
else:
with pytest.raises(IndexError):
modin_df.iloc[0, 1]
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.index, pandas_df.index)
modin_df_cp = modin_df.copy()
pandas_df_cp = pandas_df.copy()
modin_df_cp.index = [str(i) for i in modin_df_cp.index]
pandas_df_cp.index = [str(i) for i in pandas_df_cp.index]
df_equals(modin_df_cp.index, pandas_df_cp.index)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_indexing_duplicate_axis(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.index = pandas_df.index = [i // 3 for i in range(len(modin_df))]
assert any(modin_df.index.duplicated())
assert any(pandas_df.index.duplicated())
df_equals(modin_df.iloc[0], pandas_df.iloc[0])
df_equals(modin_df.loc[0], pandas_df.loc[0])
df_equals(modin_df.iloc[0, 0:4], pandas_df.iloc[0, 0:4])
df_equals(
modin_df.loc[0, modin_df.columns[0:4]],
pandas_df.loc[0, pandas_df.columns[0:4]],
)
def test_info(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).info(memory_usage="deep")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("loc", int_arg_values, ids=arg_keys("loc", int_arg_keys))
def test_insert(self, data, loc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df = modin_df.copy()
pandas_df = pandas_df.copy()
column = "New Column"
value = modin_df.iloc[:, 0]
try:
pandas_df.insert(loc, column, value)
except Exception as e:
with pytest.raises(type(e)):
modin_df.insert(loc, column, value)
else:
modin_df.insert(loc, column, value)
df_equals(modin_df, pandas_df)
with pytest.raises(ValueError):
modin_df.insert(0, "Bad Column", modin_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.insert(0, "Duplicate", modin_df[modin_df.columns[0]])
pandas_df.insert(0, "Duplicate", pandas_df[pandas_df.columns[0]])
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.insert(0, "Scalar", 100)
pandas_df.insert(0, "Scalar", 100)
df_equals(modin_df, pandas_df)
with pytest.raises(ValueError):
modin_df.insert(0, "Too Short", list(modin_df[modin_df.columns[0]])[:-1])
with pytest.raises(ValueError):
modin_df.insert(0, modin_df.columns[0], modin_df[modin_df.columns[0]])
with pytest.raises(IndexError):
modin_df.insert(len(modin_df.columns) + 100, "Bad Loc", 100)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = pd.DataFrame(columns=list("ab")).insert(
0, modin_df.columns[0], modin_df[modin_df.columns[0]]
)
pandas_result = pandas.DataFrame(columns=list("ab")).insert(
0, pandas_df.columns[0], pandas_df[pandas_df.columns[0]]
)
df_equals(modin_result, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = pd.DataFrame(index=modin_df.index).insert(
0, modin_df.columns[0], modin_df[modin_df.columns[0]]
)
pandas_result = pandas.DataFrame(index=pandas_df.index).insert(
0, pandas_df.columns[0], pandas_df[pandas_df.columns[0]]
)
df_equals(modin_result, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.insert(
0, "DataFrame insert", modin_df[[modin_df.columns[0]]]
)
pandas_result = pandas_df.insert(
0, "DataFrame insert", pandas_df[[pandas_df.columns[0]]]
)
df_equals(modin_result, pandas_result)
def test_interpolate(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).interpolate()
def test_is_copy(self):
data = test_data_values[0]
with pytest.warns(FutureWarning):
assert pd.DataFrame(data).is_copy == pandas.DataFrame(data).is_copy
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_items(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_items = modin_df.items()
pandas_items = pandas_df.items()
for modin_item, pandas_item in zip(modin_items, pandas_items):
modin_index, modin_series = modin_item
pandas_index, pandas_series = pandas_item
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iteritems(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_items = modin_df.iteritems()
pandas_items = pandas_df.iteritems()
for modin_item, pandas_item in zip(modin_items, pandas_items):
modin_index, modin_series = modin_item
pandas_index, pandas_series = pandas_item
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iterrows(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_iterrows = modin_df.iterrows()
pandas_iterrows = pandas_df.iterrows()
for modin_row, pandas_row in zip(modin_iterrows, pandas_iterrows):
modin_index, modin_series = modin_row
pandas_index, pandas_series = pandas_row
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_itertuples(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# test default
modin_it_default = modin_df.itertuples()
pandas_it_default = pandas_df.itertuples()
for modin_row, pandas_row in zip(modin_it_default, pandas_it_default):
np.testing.assert_equal(modin_row, pandas_row)
# test all combinations of custom params
indices = [True, False]
names = [None, "NotPandas", "Pandas"]
for index in indices:
for name in names:
modin_it_custom = modin_df.itertuples(index=index, name=name)
pandas_it_custom = pandas_df.itertuples(index=index, name=name)
for modin_row, pandas_row in zip(modin_it_custom, pandas_it_custom):
np.testing.assert_equal(modin_row, pandas_row)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.ix()
def test_join(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 0, 1],
"col4": [2, 4, 5, 6],
}
modin_df = pd.DataFrame(frame_data)
pandas_df = pandas.DataFrame(frame_data)
frame_data2 = {"col5": [0], "col6": [1]}
modin_df2 = pd.DataFrame(frame_data2)
pandas_df2 = pandas.DataFrame(frame_data2)
join_types = ["left", "right", "outer", "inner"]
for how in join_types:
modin_join = modin_df.join(modin_df2, how=how)
pandas_join = pandas_df.join(pandas_df2, how=how)
df_equals(modin_join, pandas_join)
frame_data3 = {"col7": [1, 2, 3, 5, 6, 7, 8]}
modin_df3 = pd.DataFrame(frame_data3)
pandas_df3 = pandas.DataFrame(frame_data3)
join_types = ["left", "outer", "inner"]
for how in join_types:
modin_join = modin_df.join([modin_df2, modin_df3], how=how)
pandas_join = pandas_df.join([pandas_df2, pandas_df3], how=how)
df_equals(modin_join, pandas_join)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_keys(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.keys(), pandas_df.keys())
def test_kurt(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).kurt()
def test_kurtosis(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).kurtosis()
def test_last(self):
i = pd.date_range("2018-04-09", periods=4, freq="2D")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.last("3D")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_last_valid_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.last_valid_index() == (pandas_df.last_valid_index())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_loc(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# We skip nan datasets because nan != nan
if "nan" not in request.node.name:
key1 = modin_df.columns[0]
key2 = modin_df.columns[1]
# Scaler
assert modin_df.loc[0, key1] == pandas_df.loc[0, key1]
# Series
df_equals(modin_df.loc[0], pandas_df.loc[0])
df_equals(modin_df.loc[1:, key1], pandas_df.loc[1:, key1])
df_equals(modin_df.loc[1:2, key1], pandas_df.loc[1:2, key1])
# DataFrame
df_equals(modin_df.loc[[1, 2]], pandas_df.loc[[1, 2]])
# List-like of booleans
indices = [
True if i % 3 == 0 else False for i in range(len(modin_df.index))
]
columns = [
True if i % 5 == 0 else False for i in range(len(modin_df.columns))
]
modin_result = modin_df.loc[indices, columns]
pandas_result = pandas_df.loc[indices, columns]
df_equals(modin_result, pandas_result)
# See issue #80
# df_equals(modin_df.loc[[1, 2], ['col1']], pandas_df.loc[[1, 2], ['col1']])
df_equals(modin_df.loc[1:2, key1:key2], pandas_df.loc[1:2, key1:key2])
# From issue #421
df_equals(modin_df.loc[:, [key2, key1]], pandas_df.loc[:, [key2, key1]])
df_equals(modin_df.loc[[2, 1], :], pandas_df.loc[[2, 1], :])
# Write Item
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.loc[[1, 2]] = 42
pandas_df_copy.loc[[1, 2]] = 42
df_equals(modin_df_copy, pandas_df_copy)
def test_loc_multi_index(self):
modin_df = pd.read_csv(
"modin/pandas/test/data/blah.csv", header=[0, 1, 2, 3], index_col=0
)
pandas_df = pandas.read_csv(
"modin/pandas/test/data/blah.csv", header=[0, 1, 2, 3], index_col=0
)
df_equals(modin_df.loc[1], pandas_df.loc[1])
df_equals(modin_df.loc[1, "Presidents"], pandas_df.loc[1, "Presidents"])
df_equals(
modin_df.loc[1, ("Presidents", "Pure mentions")],
pandas_df.loc[1, ("Presidents", "Pure mentions")],
)
assert (
modin_df.loc[1, ("Presidents", "Pure mentions", "IND", "all")]
== pandas_df.loc[1, ("Presidents", "Pure mentions", "IND", "all")]
)
df_equals(
modin_df.loc[(1, 2), "Presidents"], pandas_df.loc[(1, 2), "Presidents"]
)
tuples = [
("bar", "one"),
("bar", "two"),
("bar", "three"),
("bar", "four"),
("baz", "one"),
("baz", "two"),
("baz", "three"),
("baz", "four"),
("foo", "one"),
("foo", "two"),
("foo", "three"),
("foo", "four"),
("qux", "one"),
("qux", "two"),
("qux", "three"),
("qux", "four"),
]
modin_index = pd.MultiIndex.from_tuples(tuples, names=["first", "second"])
pandas_index = pandas.MultiIndex.from_tuples(tuples, names=["first", "second"])
frame_data = np.random.randint(0, 100, size=(16, 100))
modin_df = pd.DataFrame(
frame_data,
index=modin_index,
columns=["col{}".format(i) for i in range(100)],
)
pandas_df = pandas.DataFrame(
frame_data,
index=pandas_index,
columns=["col{}".format(i) for i in range(100)],
)
df_equals(modin_df.loc["bar", "col1"], pandas_df.loc["bar", "col1"])
assert (
modin_df.loc[("bar", "one"), "col1"]
== pandas_df.loc[("bar", "one"), "col1"]
)
df_equals(
modin_df.loc["bar", ("col1", "col2")],
pandas_df.loc["bar", ("col1", "col2")],
)
def test_lookup(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).lookup([0, 1], ["col1", "col2"])
def test_mad(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).mad()
def test_mask(self):
df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"])
m = df % 3 == 0
with pytest.warns(UserWarning):
try:
df.mask(~m, -df)
except ValueError:
pass
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_max(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.max(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.max(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_mean(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.mean(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.mean(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_median(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.median(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.median(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
class TestDFPartTwo:
def test_melt(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).melt()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"index", bool_arg_values, ids=arg_keys("index", bool_arg_keys)
)
def test_memory_usage(self, data, index):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
modin_result = modin_df.memory_usage(index=index)
pandas_result = pandas_df.memory_usage(index=index)
df_equals(modin_result, pandas_result)
def test_merge(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 0, 1],
"col4": [2, 4, 5, 6],
}
modin_df = pd.DataFrame(frame_data)
pandas_df = pandas.DataFrame(frame_data)
frame_data2 = {"col1": [0, 1, 2], "col2": [1, 5, 6]}
modin_df2 = pd.DataFrame(frame_data2)
pandas_df2 = pandas.DataFrame(frame_data2)
join_types = ["outer", "inner"]
for how in join_types:
# Defaults
modin_result = modin_df.merge(modin_df2, how=how)
pandas_result = pandas_df.merge(pandas_df2, how=how)
df_equals(modin_result, pandas_result)
# left_on and right_index
modin_result = modin_df.merge(
modin_df2, how=how, left_on="col1", right_index=True
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_on="col1", right_index=True
)
df_equals(modin_result, pandas_result)
# left_index and right_on
modin_result = modin_df.merge(
modin_df2, how=how, left_index=True, right_on="col1"
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_index=True, right_on="col1"
)
df_equals(modin_result, pandas_result)
# left_on and right_on col1
modin_result = modin_df.merge(
modin_df2, how=how, left_on="col1", right_on="col1"
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_on="col1", right_on="col1"
)
df_equals(modin_result, pandas_result)
# left_on and right_on col2
modin_result = modin_df.merge(
modin_df2, how=how, left_on="col2", right_on="col2"
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_on="col2", right_on="col2"
)
df_equals(modin_result, pandas_result)
# left_index and right_index
modin_result = modin_df.merge(
modin_df2, how=how, left_index=True, right_index=True
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_index=True, right_index=True
)
df_equals(modin_result, pandas_result)
# Named Series promoted to DF
s = pd.Series(frame_data2.get("col1"))
with pytest.raises(ValueError):
modin_df.merge(s)
s = pd.Series(frame_data2.get("col1"), name="col1")
df_equals(modin_df.merge(s), modin_df.merge(modin_df2[["col1"]]))
with pytest.raises(ValueError):
modin_df.merge("Non-valid type")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_min(self, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.min(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.min(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_mode(self, request, data, axis, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.mode(axis=axis, numeric_only=numeric_only)
except Exception:
with pytest.raises(TypeError):
modin_df.mode(axis=axis, numeric_only=numeric_only)
else:
modin_result = modin_df.mode(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ndim(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.ndim == pandas_df.ndim
def test_nlargest(self):
df = pd.DataFrame(
{
"population": [
59000000,
65000000,
434000,
434000,
434000,
337000,
11300,
11300,
11300,
],
"GDP": [1937894, 2583560, 12011, 4520, 12128, 17036, 182, 38, 311],
"alpha-2": ["IT", "FR", "MT", "MV", "BN", "IS", "NR", "TV", "AI"],
},
index=[
"Italy",
"France",
"Malta",
"Maldives",
"Brunei",
"Iceland",
"Nauru",
"Tuvalu",
"Anguilla",
],
)
with pytest.warns(UserWarning):
df.nlargest(3, "population")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notna(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.notna(), pandas_df.notna())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notnull(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.notnull(), pandas_df.notnull())
def test_nsmallest(self):
df = pd.DataFrame(
{
"population": [
59000000,
65000000,
434000,
434000,
434000,
337000,
11300,
11300,
11300,
],
"GDP": [1937894, 2583560, 12011, 4520, 12128, 17036, 182, 38, 311],
"alpha-2": ["IT", "FR", "MT", "MV", "BN", "IS", "NR", "TV", "AI"],
},
index=[
"Italy",
"France",
"Malta",
"Maldives",
"Brunei",
"Iceland",
"Nauru",
"Tuvalu",
"Anguilla",
],
)
with pytest.warns(UserWarning):
df.nsmallest(3, "population")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"dropna", bool_arg_values, ids=arg_keys("dropna", bool_arg_keys)
)
def test_nunique(self, data, axis, dropna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.nunique(axis=axis, dropna=dropna)
pandas_result = pandas_df.nunique(axis=axis, dropna=dropna)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.nunique(axis=axis, dropna=dropna)
pandas_result = pandas_df.T.nunique(axis=axis, dropna=dropna)
df_equals(modin_result, pandas_result)
def test_pct_change(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).pct_change()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pipe(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
n = len(modin_df.index)
a, b, c = 2 % n, 0, 3 % n
col = modin_df.columns[3 % len(modin_df.columns)]
def h(x):
return x.drop(columns=[col])
def g(x, arg1=0):
for _ in range(arg1):
x = x.append(x)
return x
def f(x, arg2=0, arg3=0):
return x.drop([arg2, arg3])
df_equals(
f(g(h(modin_df), arg1=a), arg2=b, arg3=c),
(modin_df.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)),
)
df_equals(
(modin_df.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)),
(pandas_df.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)),
)
def test_pivot(self):
df = pd.DataFrame(
{
"foo": ["one", "one", "one", "two", "two", "two"],
"bar": ["A", "B", "C", "A", "B", "C"],
"baz": [1, 2, 3, 4, 5, 6],
"zoo": ["x", "y", "z", "q", "w", "t"],
}
)
with pytest.warns(UserWarning):
df.pivot(index="foo", columns="bar", values="baz")
def test_pivot_table(self):
df = pd.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
}
)
with pytest.warns(UserWarning):
df.pivot_table(values="D", index=["A", "B"], columns=["C"], aggfunc=np.sum)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_plot(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
# We have to test this way because equality in plots means same object.
zipped_plot_lines = zip(modin_df.plot().lines, pandas_df.plot().lines)
for l, r in zipped_plot_lines:
if isinstance(l.get_xdata(), np.ma.core.MaskedArray) and isinstance(
r.get_xdata(), np.ma.core.MaskedArray
):
assert all((l.get_xdata() == r.get_xdata()).data)
else:
assert np.array_equal(l.get_xdata(), r.get_xdata())
if isinstance(l.get_ydata(), np.ma.core.MaskedArray) and isinstance(
r.get_ydata(), np.ma.core.MaskedArray
):
assert all((l.get_ydata() == r.get_ydata()).data)
else:
assert np.array_equal(l.get_xdata(), r.get_xdata())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pop(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
temp_modin_df = modin_df.copy()
temp_pandas_df = pandas_df.copy()
modin_popped = temp_modin_df.pop(key)
pandas_popped = temp_pandas_df.pop(key)
df_equals(modin_popped, pandas_popped)
df_equals(temp_modin_df, temp_pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize(
"min_count", int_arg_values, ids=arg_keys("min_count", int_arg_keys)
)
def test_prod(self, request, data, axis, skipna, numeric_only, min_count):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.prod(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.prod(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.T.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize(
"min_count", int_arg_values, ids=arg_keys("min_count", int_arg_keys)
)
def test_product(self, request, data, axis, skipna, numeric_only, min_count):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.product(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.product(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.product(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("q", quantiles_values, ids=quantiles_keys)
def test_quantile(self, request, data, q):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if not name_contains(request.node.name, no_numeric_dfs):
df_equals(modin_df.quantile(q), pandas_df.quantile(q))
df_equals(modin_df.quantile(q, axis=1), pandas_df.quantile(q, axis=1))
try:
pandas_result = pandas_df.quantile(q, axis=1, numeric_only=False)
except Exception as e:
with pytest.raises(type(e)):
modin_df.quantile(q, axis=1, numeric_only=False)
else:
modin_result = modin_df.quantile(q, axis=1, numeric_only=False)
df_equals(modin_result, pandas_result)
else:
with pytest.raises(ValueError):
modin_df.quantile(q)
if not name_contains(request.node.name, no_numeric_dfs):
df_equals(modin_df.T.quantile(q), pandas_df.T.quantile(q))
df_equals(modin_df.T.quantile(q, axis=1), pandas_df.T.quantile(q, axis=1))
try:
pandas_result = pandas_df.T.quantile(q, axis=1, numeric_only=False)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.quantile(q, axis=1, numeric_only=False)
else:
modin_result = modin_df.T.quantile(q, axis=1, numeric_only=False)
df_equals(modin_result, pandas_result)
else:
with pytest.raises(ValueError):
modin_df.T.quantile(q)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("funcs", query_func_values, ids=query_func_keys)
def test_query(self, data, funcs):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.query("")
with pytest.raises(NotImplementedError):
x = 2 # noqa F841
modin_df.query("col1 < @x")
try:
pandas_result = pandas_df.query(funcs)
except Exception as e:
with pytest.raises(type(e)):
modin_df.query(funcs)
else:
modin_result = modin_df.query(funcs)
df_equals(modin_result, pandas_result)
def test_query_after_insert(self):
modin_df = pd.DataFrame({"x": [-1, 0, 1, None], "y": [1, 2, None, 3]})
modin_df["z"] = modin_df.eval("x / y")
modin_df = modin_df.query("z >= 0")
modin_result = modin_df.reset_index(drop=True)
modin_result.columns = ["a", "b", "c"]
pandas_df = pd.DataFrame({"x": [-1, 0, 1, None], "y": [1, 2, None, 3]})
pandas_df["z"] = pandas_df.eval("x / y")
pandas_df = pandas_df.query("z >= 0")
pandas_result = pandas_df.reset_index(drop=True)
pandas_result.columns = ["a", "b", "c"]
df_equals(modin_result, pandas_result)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize(
"na_option", ["keep", "top", "bottom"], ids=["keep", "top", "bottom"]
)
def test_rank(self, data, axis, numeric_only, na_option):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.rank(
axis=axis, numeric_only=numeric_only, na_option=na_option
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.rank(axis=axis, numeric_only=numeric_only, na_option=na_option)
else:
modin_result = modin_df.rank(
axis=axis, numeric_only=numeric_only, na_option=na_option
)
df_equals(modin_result, pandas_result)
def test_reindex(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
pandas_df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.reindex([0, 3, 2, 1]), pandas_df.reindex([0, 3, 2, 1]))
df_equals(modin_df.reindex([0, 6, 2]), pandas_df.reindex([0, 6, 2]))
df_equals(
modin_df.reindex(["col1", "col3", "col4", "col2"], axis=1),
pandas_df.reindex(["col1", "col3", "col4", "col2"], axis=1),
)
df_equals(
modin_df.reindex(["col1", "col7", "col4", "col8"], axis=1),
pandas_df.reindex(["col1", "col7", "col4", "col8"], axis=1),
)
df_equals(
modin_df.reindex(index=[0, 1, 5], columns=["col1", "col7", "col4", "col8"]),
pandas_df.reindex(
index=[0, 1, 5], columns=["col1", "col7", "col4", "col8"]
),
)
df_equals(
modin_df.T.reindex(["col1", "col7", "col4", "col8"], axis=0),
pandas_df.T.reindex(["col1", "col7", "col4", "col8"], axis=0),
)
def test_reindex_like(self):
df1 = pd.DataFrame(
[
[24.3, 75.7, "high"],
[31, 87.8, "high"],
[22, 71.6, "medium"],
[35, 95, "medium"],
],
columns=["temp_celsius", "temp_fahrenheit", "windspeed"],
index=pd.date_range(start="2014-02-12", end="2014-02-15", freq="D"),
)
df2 = pd.DataFrame(
[[28, "low"], [30, "low"], [35.1, "medium"]],
columns=["temp_celsius", "windspeed"],
index=pd.DatetimeIndex(["2014-02-12", "2014-02-13", "2014-02-15"]),
)
with pytest.warns(UserWarning):
df2.reindex_like(df1)
def test_rename_sanity(self):
test_data = TestData()
mapping = {"A": "a", "B": "b", "C": "c", "D": "d"}
modin_df = pd.DataFrame(test_data.frame)
df_equals(
modin_df.rename(columns=mapping), test_data.frame.rename(columns=mapping)
)
renamed2 = test_data.frame.rename(columns=str.lower)
df_equals(modin_df.rename(columns=str.lower), renamed2)
modin_df = pd.DataFrame(renamed2)
df_equals(
modin_df.rename(columns=str.upper), renamed2.rename(columns=str.upper)
)
# index
data = {"A": {"foo": 0, "bar": 1}}
# gets sorted alphabetical
df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
tm.assert_index_equal(
modin_df.rename(index={"foo": "bar", "bar": "foo"}).index,
df.rename(index={"foo": "bar", "bar": "foo"}).index,
)
tm.assert_index_equal(
modin_df.rename(index=str.upper).index, df.rename(index=str.upper).index
)
# have to pass something
with pytest.raises(TypeError):
modin_df.rename()
# partial columns
renamed = test_data.frame.rename(columns={"C": "foo", "D": "bar"})
modin_df = pd.DataFrame(test_data.frame)
tm.assert_index_equal(
modin_df.rename(columns={"C": "foo", "D": "bar"}).index,
test_data.frame.rename(columns={"C": "foo", "D": "bar"}).index,
)
# TODO: Uncomment when transpose works
# other axis
# renamed = test_data.frame.T.rename(index={'C': 'foo', 'D': 'bar'})
# tm.assert_index_equal(
# test_data.frame.T.rename(index={'C': 'foo', 'D': 'bar'}).index,
# modin_df.T.rename(index={'C': 'foo', 'D': 'bar'}).index)
# index with name
index = pandas.Index(["foo", "bar"], name="name")
renamer = pandas.DataFrame(data, index=index)
modin_df = pd.DataFrame(data, index=index)
renamed = renamer.rename(index={"foo": "bar", "bar": "foo"})
modin_renamed = modin_df.rename(index={"foo": "bar", "bar": "foo"})
tm.assert_index_equal(renamed.index, modin_renamed.index)
assert renamed.index.name == modin_renamed.index.name
def test_rename_multiindex(self):
tuples_index = [("foo1", "bar1"), ("foo2", "bar2")]
tuples_columns = [("fizz1", "buzz1"), ("fizz2", "buzz2")]
index = pandas.MultiIndex.from_tuples(tuples_index, names=["foo", "bar"])
columns = pandas.MultiIndex.from_tuples(tuples_columns, names=["fizz", "buzz"])
frame_data = [(0, 0), (1, 1)]
df = pandas.DataFrame(frame_data, index=index, columns=columns)
modin_df = pd.DataFrame(frame_data, index=index, columns=columns)
#
# without specifying level -> accross all levels
renamed = df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
modin_renamed = modin_df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
tm.assert_index_equal(renamed.index, modin_renamed.index)
renamed = df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
assert renamed.index.names == modin_renamed.index.names
assert renamed.columns.names == modin_renamed.columns.names
#
# with specifying a level
# dict
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0)
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz")
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz"
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1)
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz")
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz"
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
# function
func = str.upper
renamed = df.rename(columns=func, level=0)
modin_renamed = modin_df.rename(columns=func, level=0)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns=func, level="fizz")
modin_renamed = modin_df.rename(columns=func, level="fizz")
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns=func, level=1)
modin_renamed = modin_df.rename(columns=func, level=1)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns=func, level="buzz")
modin_renamed = modin_df.rename(columns=func, level="buzz")
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
# index
renamed = df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0)
modin_renamed = modin_df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0)
tm.assert_index_equal(modin_renamed.index, renamed.index)
@pytest.mark.skip(reason="Pandas does not pass this test")
def test_rename_nocopy(self):
test_data = TestData().frame
modin_df = pd.DataFrame(test_data)
modin_renamed = modin_df.rename(columns={"C": "foo"}, copy=False)
modin_renamed["foo"] = 1
assert (modin_df["C"] == 1).all()
def test_rename_inplace(self):
test_data = TestData().frame
modin_df = pd.DataFrame(test_data)
df_equals(
modin_df.rename(columns={"C": "foo"}),
test_data.rename(columns={"C": "foo"}),
)
frame = test_data.copy()
modin_frame = modin_df.copy()
frame.rename(columns={"C": "foo"}, inplace=True)
modin_frame.rename(columns={"C": "foo"}, inplace=True)
df_equals(modin_frame, frame)
def test_rename_bug(self):
# rename set ref_locs, and set_index was not resetting
frame_data = {0: ["foo", "bar"], 1: ["bah", "bas"], 2: [1, 2]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df = df.rename(columns={0: "a"})
df = df.rename(columns={1: "b"})
# TODO: Uncomment when set_index is implemented
# df = df.set_index(['a', 'b'])
# df.columns = ['2001-01-01']
modin_df = modin_df.rename(columns={0: "a"})
modin_df = modin_df.rename(columns={1: "b"})
# TODO: Uncomment when set_index is implemented
# modin_df = modin_df.set_index(['a', 'b'])
# modin_df.columns = ['2001-01-01']
df_equals(modin_df, df)
def test_rename_axis(self):
data = {"num_legs": [4, 4, 2], "num_arms": [0, 0, 2]}
index = ["dog", "cat", "monkey"]
modin_df = pd.DataFrame(data, index)
pandas_df = pandas.DataFrame(data, index)
df_equals(modin_df.rename_axis("animal"), pandas_df.rename_axis("animal"))
df_equals(
modin_df.rename_axis("limbs", axis="columns"),
pandas_df.rename_axis("limbs", axis="columns"),
)
modin_df.rename_axis("limbs", axis="columns", inplace=True)
pandas_df.rename_axis("limbs", axis="columns", inplace=True)
df_equals(modin_df, pandas_df)
new_index = pd.MultiIndex.from_product(
[["mammal"], ["dog", "cat", "monkey"]], names=["type", "name"]
)
modin_df.index = new_index
pandas_df.index = new_index
df_equals(
modin_df.rename_axis(index={"type": "class"}),
pandas_df.rename_axis(index={"type": "class"}),
)
df_equals(
modin_df.rename_axis(columns=str.upper),
pandas_df.rename_axis(columns=str.upper),
)
df_equals(
modin_df.rename_axis(
columns=[str.upper(o) for o in modin_df.columns.names]
),
pandas_df.rename_axis(
columns=[str.upper(o) for o in pandas_df.columns.names]
),
)
with pytest.raises(ValueError):
df_equals(
modin_df.rename_axis(str.upper, axis=1),
pandas_df.rename_axis(str.upper, axis=1),
)
def test_rename_axis_inplace(self):
test_frame = TestData().frame
modin_df = pd.DataFrame(test_frame)
result = test_frame.copy()
modin_result = modin_df.copy()
no_return = result.rename_axis("foo", inplace=True)
modin_no_return = modin_result.rename_axis("foo", inplace=True)
assert no_return is modin_no_return
df_equals(modin_result, result)
result = test_frame.copy()
modin_result = modin_df.copy()
no_return = result.rename_axis("bar", axis=1, inplace=True)
modin_no_return = modin_result.rename_axis("bar", axis=1, inplace=True)
assert no_return is modin_no_return
df_equals(modin_result, result)
def test_reorder_levels(self):
df = pd.DataFrame(
index=pd.MultiIndex.from_tuples(
[
(num, letter, color)
for num in range(1, 3)
for letter in ["a", "b", "c"]
for color in ["Red", "Green"]
],
names=["Number", "Letter", "Color"],
)
)
df["Value"] = np.random.randint(1, 100, len(df))
with pytest.warns(UserWarning):
df.reorder_levels(["Letter", "Color", "Number"])
def test_replace(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).replace()
def test_resample(self):
d = dict(
{
"price": [10, 11, 9, 13, 14, 18, 17, 19],
"volume": [50, 60, 40, 100, 50, 100, 40, 50],
}
)
df = pd.DataFrame(d)
df["week_starting"] = pd.date_range("01/01/2018", periods=8, freq="W")
with pytest.warns(UserWarning):
df.resample("M", on="week_starting")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_reset_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.reset_index(inplace=False)
pandas_result = pandas_df.reset_index(inplace=False)
df_equals(modin_result, pandas_result)
modin_df_cp = modin_df.copy()
pd_df_cp = pandas_df.copy()
modin_df_cp.reset_index(inplace=True)
pd_df_cp.reset_index(inplace=True)
df_equals(modin_df_cp, pd_df_cp)
def test_rolling(self):
df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})
with pytest.warns(UserWarning):
df.rolling(2, win_type="triang")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_round(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.round(), pandas_df.round())
df_equals(modin_df.round(1), pandas_df.round(1))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_sample(self, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.sample(n=3, frac=0.4, axis=axis)
with pytest.raises(KeyError):
modin_df.sample(frac=0.5, weights="CoLuMn_No_ExIsT", axis=0)
with pytest.raises(ValueError):
modin_df.sample(frac=0.5, weights=modin_df.columns[0], axis=1)
with pytest.raises(ValueError):
modin_df.sample(
frac=0.5, weights=[0.5 for _ in range(len(modin_df.index[:-1]))], axis=0
)
with pytest.raises(ValueError):
modin_df.sample(
frac=0.5,
weights=[0.5 for _ in range(len(modin_df.columns[:-1]))],
axis=1,
)
with pytest.raises(ValueError):
modin_df.sample(n=-3, axis=axis)
with pytest.raises(ValueError):
modin_df.sample(frac=0.2, weights= | pandas.Series() | pandas.Series |
from datetime import datetime
import inspect
import numpy as np
import pytest
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
Series,
Timestamp,
cut,
date_range,
to_datetime,
)
import pandas._testing as tm
class TestDataFrameAlterAxes:
def test_set_index_directly(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df.index = idx
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.index = idx[::2]
def test_convert_dti_to_series(self):
# don't cast a DatetimeIndex WITH a tz, leave as object
# GH 6032
idx = DatetimeIndex(
to_datetime(["2013-1-1 13:00", "2013-1-2 14:00"]), name="B"
).tz_localize("US/Pacific")
df = DataFrame(np.random.randn(2, 1), columns=["A"])
expected = Series(
np.array(
[
Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
],
dtype="object",
),
name="B",
)
# convert index to series
result = Series(idx)
tm.assert_series_equal(result, expected)
# assign to frame
df["B"] = idx
result = df["B"]
tm.assert_series_equal(result, expected)
# convert to series while keeping the timezone
msg = "stop passing 'keep_tz'"
with tm.assert_produces_warning(FutureWarning) as m:
result = idx.to_series(keep_tz=True, index=[0, 1])
tm.assert_series_equal(result, expected)
assert msg in str(m[0].message)
# convert to utc
with tm.assert_produces_warning(FutureWarning) as m:
df["B"] = idx.to_series(keep_tz=False, index=[0, 1])
result = df["B"]
comp = Series(DatetimeIndex(expected.values).tz_localize(None), name="B")
tm.assert_series_equal(result, comp)
msg = "do 'idx.tz_convert(None)' before calling"
assert msg in str(m[0].message)
result = idx.to_series(index=[0, 1])
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning) as m:
result = idx.to_series(keep_tz=False, index=[0, 1])
tm.assert_series_equal(result, expected.dt.tz_convert(None))
msg = "do 'idx.tz_convert(None)' before calling"
assert msg in str(m[0].message)
# list of datetimes with a tz
df["B"] = idx.to_pydatetime()
result = df["B"]
tm.assert_series_equal(result, expected)
# GH 6785
# set the index manually
import pytz
df = DataFrame([{"ts": datetime(2014, 4, 1, tzinfo=pytz.utc), "foo": 1}])
expected = df.set_index("ts")
df.index = df["ts"]
df.pop("ts")
tm.assert_frame_equal(df, expected)
def test_set_columns(self, float_string_frame):
cols = Index(np.arange(len(float_string_frame.columns)))
float_string_frame.columns = cols
with pytest.raises(ValueError, match="Length mismatch"):
float_string_frame.columns = cols[::2]
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range("2011/01/01", periods=6, freq="M", tz="US/Eastern")
idx2 = date_range("2013", periods=6, freq="A", tz="Asia/Tokyo")
df = df.set_index(idx1)
tm.assert_index_equal(df.index, idx1)
df = df.reindex(idx2)
tm.assert_index_equal(df.index, idx2)
# GH 11314
# with tz
index = date_range(
datetime(2015, 10, 1), datetime(2015, 10, 1, 23), freq="H", tz="US/Eastern"
)
df = DataFrame(np.random.randn(24, 1), columns=["a"], index=index)
new_index = date_range(
datetime(2015, 10, 2), datetime(2015, 10, 2, 23), freq="H", tz="US/Eastern"
)
result = df.set_index(new_index)
assert result.index.freq == index.freq
# Renaming
def test_reindex_api_equivalence(self):
# equivalence of the labels/axis and index/columns API's
df = DataFrame(
[[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=["a", "b", "c"],
columns=["d", "e", "f"],
)
res1 = df.reindex(["b", "a"])
res2 = df.reindex(index=["b", "a"])
res3 = df.reindex(labels=["b", "a"])
res4 = df.reindex(labels=["b", "a"], axis=0)
res5 = df.reindex(["b", "a"], axis=0)
for res in [res2, res3, res4, res5]:
tm.assert_frame_equal(res1, res)
res1 = df.reindex(columns=["e", "d"])
res2 = df.reindex(["e", "d"], axis=1)
res3 = df.reindex(labels=["e", "d"], axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
res1 = df.reindex(index=["b", "a"], columns=["e", "d"])
res2 = df.reindex(columns=["e", "d"], index=["b", "a"])
res3 = df.reindex(labels=["b", "a"], axis=0).reindex(labels=["e", "d"], axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
def test_assign_columns(self, float_frame):
float_frame["hi"] = "there"
df = float_frame.copy()
df.columns = ["foo", "bar", "baz", "quux", "foo2"]
tm.assert_series_equal(float_frame["C"], df["baz"], check_names=False)
tm.assert_series_equal(float_frame["hi"], df["foo2"], check_names=False)
def test_set_index_preserve_categorical_dtype(self):
# GH13743, GH13854
df = DataFrame(
{
"A": [1, 2, 1, 1, 2],
"B": [10, 16, 22, 28, 34],
"C1": Categorical(list("abaab"), categories=list("bac"), ordered=False),
"C2": Categorical(list("abaab"), categories=list("bac"), ordered=True),
}
)
for cols in ["C1", "C2", ["A", "C1"], ["A", "C2"], ["C1", "C2"]]:
result = df.set_index(cols).reset_index()
result = result.reindex(columns=df.columns)
tm.assert_frame_equal(result, df)
def test_rename_signature(self):
sig = inspect.signature(DataFrame.rename)
parameters = set(sig.parameters)
assert parameters == {
"self",
"mapper",
"index",
"columns",
"axis",
"inplace",
"copy",
"level",
"errors",
}
def test_reindex_signature(self):
sig = inspect.signature(DataFrame.reindex)
parameters = set(sig.parameters)
assert parameters == {
"self",
"labels",
"index",
"columns",
"axis",
"limit",
"copy",
"level",
"method",
"fill_value",
"tolerance",
}
class TestIntervalIndex:
def test_setitem(self):
df = DataFrame({"A": range(10)})
s = cut(df.A, 5)
assert isinstance(s.cat.categories, IntervalIndex)
# B & D end up as Categoricals
# the remainer are converted to in-line objects
# contining an IntervalIndex.values
df["B"] = s
df["C"] = np.array(s)
df["D"] = s.values
df["E"] = np.array(s.values)
assert is_categorical_dtype(df["B"].dtype)
assert is_interval_dtype(df["B"].cat.categories)
assert is_categorical_dtype(df["D"].dtype)
assert | is_interval_dtype(df["D"].cat.categories) | pandas.core.dtypes.common.is_interval_dtype |
#!/usr/bin/env python
import PySimpleGUI as sg
import sys
import os
import csv
import pandas as pd
import re
# TODOS:
# Add props at run time
# sg.theme('Dark Red')
sg.theme('Dark Blue 3')
# print = sg.Print
bigfont = ("Arial", 16)
rule = ("Arial", 10)
def pad(s, l=12):
return s.ljust(l, " ")
def getlines(filename):
with open(filename, "r") as f:
return [s.strip() for s in f.readlines()]
def choose_main():
layout = [[sg.Text('Params file')],
[sg.Input(key='-FILE-', visible=False, enable_events=True), sg.FileBrowse()]]
event, values = sg.Window('File Compare', layout).read(close=True)
mainwin(values['-FILE-'])
# print(f'You chose: {values["-FILE-"]}')
def generate_scripts(values):
grid = {}
template = None
experiments = None
output_dir = None
r = re.compile(r'\[([^\]]*)\]')
for k, v in values.items():
if "-PROP-" in str(k):
k = k[6:]
if values['-CHECK-' + k]:
try:
min_, max_, step = [int(x) for x in [values[a + k] for a in ['-MIN-', '-MAX-', '-STEP-']]]
max_ = max_ + 1
except:
raise ValueError("Invalid values for min/max/step")
grid[k] = range(min_, max_, step)
else:
match = re.search(r, v)
if match:
if not os.path.exists(match.group(1)):
raise ValueError(f"File {match.group(1)} specified as value not found.")
grid[k] = getlines(match.group(1))
else:
grid[k] = v.split(",")
elif k == "-OUTPUT-":
output_dir = v
elif k == "-TEMPLATE-":
template = v
elif k == "-EXPERIMENT-":
experiments = v
keys = list(grid.keys())
results = _iterate([], keys, grid, {})
if not os.path.exists(template):
raise ValueError(f"File {template} not found.")
exp_idx = 1
if not os.path.exists(experiments):
create = sg.popup_ok_cancel('Experiments file not found. Create new?')
if create != "OK":
raise ValueError(f"Experiments file {experiments} not found.")
else:
exp_df = | pd.DataFrame(columns=["id"]) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author : 陈坤泽
# @Email : <EMAIL>
# @Date : 2021/01/20 14:51
"""
对pycocotools的一些功能整合、封装
代码中,gt指ground truth,真实标注
dt指detection,模型检测出来的结果
除了 label.py 中定义的
CocoGtData 专门处理 gt 格式数据
CocoData 同时处理 gt dt 格式数据
这里对外有两个类
CocoEval 计算coco指标
CocoMatch 进行一些高级的结果分析
生成的结果可以用 xllabelme 打开 (pip install xllabelme)
"""
from pyxllib.prog.pupil import check_install_package
check_install_package('xlcocotools')
from collections import ChainMap, defaultdict, Counter
import copy
import json
import os
import pathlib
import random
import sys
import pandas as pd
import PIL
from tqdm import tqdm
from pyxllib.file.packlib.zipfile import ZipFile
from pyxllib.prog.newbie import round_int
from pyxllib.prog.pupil import DictTool
from pyxllib.prog.specialist import mtqdm
from pyxllib.algo.pupil import Groups, make_index_function, matchpairs
from pyxllib.algo.geo import rect_bounds, rect2polygon, reshape_coords, ltrb2xywh, xywh2ltrb, ComputeIou
from pyxllib.algo.stat import dataframes_to_excel
from pyxllib.file.specialist import File, Dir, PathGroups
from pyxllib.debug.specialist import get_xllog
from pyxlpr.data.icdar import IcdarEval
from pyxlpr.data.labelme import LABEL_COLORMAP7, ToLabelmeJson, LabelmeDataset, LabelmeDict
from xlcocotools.coco import COCO
from xlcocotools.cocoeval import COCOeval
class CocoGtData:
""" 类coco格式的json数据处理
不一定要跟coco gt结构完全相同,只要相似就行,
比如images、annotaions、categories都可以扩展自定义字段
"""
def __init__(self, gt):
self.gt_dict = gt if isinstance(gt, dict) else File(gt).read()
@classmethod
def gen_image(cls, image_id, file_name, height=None, width=None, **kwargs):
""" 初始化一个图片标注,使用位置参数,复用的时候可以节省代码量 """
# 没输入height、width时会自动从file_name读取计算
# 但千万注意,这里coco的file_name输入的是相对路径,并不一定在工作目录下能work,一般还是推荐自己输入height、width
if height is None or width is None:
width, height = PIL.Image.open(str(file_name)).size
im = {'id': int(image_id), 'file_name': file_name,
'height': int(height), 'width': int(width)}
if kwargs:
im.update(kwargs)
return im
@classmethod
def gen_images(cls, imdir, start_idx=1):
""" 自动生成标准的images字段
:param imdir: 图片目录
:param start_idx: 图片起始下标
:return: list[dict(id, file_name, width, height)]
"""
files = Dir(imdir).select_files(['*.jpg', '*.png'])
images = []
for i, f in enumerate(files, start=start_idx):
w, h = Image.open(str(f)).size
images.append({'id': i, 'file_name': f.name, 'width': w, 'height': h})
return images
@classmethod
def points2segmentation(cls, pts):
""" labelme的points结构转segmentation分割结构
"""
# 1 两个点要转4个点
if len(pts) == 2:
pts = rect2polygon(pts)
else:
pts = list(pts)
# 2 点集要封闭,末尾要加上第0个点
pts.append(pts[0])
# 多边形因为要画出所有的点,还要封闭,数据有点多,还是只存整数节省空间
pts = [round_int(v) for v in reshape_coords(pts, 1)]
return pts
@classmethod
def gen_annotation(cls, **kwargs):
""" 智能地生成一个annotation字典
这个略微有点过度封装了
但没事,先放着,可以不拿出来用~~
:param points: 必须是n*2的结构
"""
a = kwargs.copy()
# a = {'id': 0, 'area': 0, 'bbox': [0, 0, 0, 0],
# 'category_id': 1, 'image_id': 0, 'iscrowd': 0, 'segmentation': []}
if 'points' in a: # points是一个特殊参数,使用“一个”多边形来标注(注意区别segmentation是多个多边形)
if 'segmentation' not in a:
a['segmentation'] = [cls.points2segmentation(a['points'])]
del a['points']
if 'bbox' not in a:
pts = []
for seg in a['segmentation']:
pts += seg
a['bbox'] = ltrb2xywh(rect_bounds(pts))
if 'area' not in a: # 自动计算面积
a['area'] = int(a['bbox'][2] * a['bbox'][3])
for k in ['id', 'image_id']:
if k not in a:
a[k] = 0
if 'category_id' not in a:
a['category_id'] = 1
if 'iscrowd' not in a:
a['iscrowd'] = 0
return a
@classmethod
def gen_quad_annotations(cls, file, *, image_id, start_box_id, category_id=1, **kwargs):
""" 解析一张图片对应的txt标注文件
:param file: 标注文件,有多行标注
每行是x1,y1,x2,y2,x3,y3,x4,y4[,label] (label可以不存在)
:param image_id: 该图片id
:param start_box_id: box_id起始编号
:param category_id: 归属类别
"""
lines = File(file).read()
box_id = start_box_id
annotations = []
for line in lines.splitlines():
vals = line.split(',', maxsplit=8)
if len(vals) < 2: continue
attrs = {'id': box_id, 'image_id': image_id, 'category_id': category_id}
if len(vals) == 9:
attrs['label'] = vals[8]
# print(vals)
seg = [int(v) for v in vals[:8]]
attrs['segmentation'] = [seg]
attrs['bbox'] = ltrb2xywh(rect_bounds(seg))
if kwargs:
attrs.update(kwargs)
annotations.append(cls.gen_annotation(**attrs))
box_id += 1
return annotations
@classmethod
def gen_categories(cls, cats):
if isinstance(cats, list):
# 如果输入是一个类别列表清单,则按1、2、3的顺序给其编号
return [{'id': i, 'name': x, 'supercategory': ''} for i, x in enumerate(cats, start=1)]
else:
raise TypeError
# TODO 扩展支持其他构造方法
@classmethod
def gen_gt_dict(cls, images, annotations, categories, outfile=None):
data = {'images': images, 'annotations': annotations, 'categories': categories}
if outfile is not None:
File(outfile).write(data)
return data
@classmethod
def is_gt_dict(cls, gt_dict):
if isinstance(gt_dict, (tuple, list)):
return False
has_keys = set('images annotations categories'.split())
return not (has_keys - gt_dict.keys())
def clear_gt_segmentation(self, *, inplace=False):
""" 有的coco json文件太大,如果只做普通的bbox检测任务,可以把segmentation的值删掉
"""
gt_dict = self.gt_dict if inplace else copy.deepcopy(self.gt_dict)
for an in gt_dict['annotations']:
an['segmentation'] = []
return gt_dict
def get_catname_func(self):
id2name = {x['id']: x['name'] for x in self.gt_dict['categories']}
def warpper(cat_id, default=...):
"""
:param cat_id:
:param default: 没匹配到的默认值
... 不是默认值,而是代表匹配不到直接报错
:return:
"""
if cat_id in id2name:
return id2name[cat_id]
else:
if default is ...:
raise IndexError(f'{cat_id}')
else:
return default
return warpper
def _group_base(self, group_anns, reserve_empty=False):
if reserve_empty:
for im in self.gt_dict['images']:
yield im, group_anns.get(im['id'], [])
else:
id2im = {im['id']: im for im in self.gt_dict['images']}
for k, v in group_anns.items():
yield id2im[k], v
def group_gt(self, *, reserve_empty=False):
""" 遍历gt的每一张图片的标注
这个是用字典的方式来实现分组,没用 df.groupby 的功能
:param reserve_empty: 是否保留空im对应的结果
:return: [(im, annos), ...] 每一组是im标注和对应的一组annos标注
"""
group_anns = defaultdict(list)
[group_anns[an['image_id']].append(an) for an in self.gt_dict['annotations']]
return self._group_base(group_anns, reserve_empty)
def select_gt(self, ids, *, inplace=False):
""" 删除一些images标注(会删除对应的annotations),挑选数据,或者减小json大小
:param ids: int类型表示保留的图片id,str类型表示保留的图片名,可以混合使用
[341427, 'PMC4055390_00006.jpg', ...]
:return: 筛选出的新字典
"""
gt_dict = self.gt_dict
# 1 ids 统一为int类型的id值
if not isinstance(ids, (list, tuple, set)):
ids = [ids]
map_name2id = {item['file_name']: item['id'] for item in gt_dict['images']}
ids = set([(map_name2id[x] if isinstance(x, str) else x) for x in ids])
# 2 简化images和annotations
dst = {'images': [x for x in gt_dict['images'] if (x['id'] in ids)],
'annotations': [x for x in gt_dict['annotations'] if (x['image_id'] in ids)],
'categories': gt_dict['categories']}
if inplace: self.gt_dict = dst
return dst
def random_select_gt(self, number=20, *, inplace=False):
""" 从gt中随机抽出number个数据 """
ids = [x['id'] for x in self.gt_dict['images']]
random.shuffle(ids)
gt_dict = self.select_gt(ids[:number])
if inplace: self.gt_dict = gt_dict
return gt_dict
def select_gt_by_imdir(self, imdir, *, inplace=False):
""" 基于imdir目录下的图片来过滤src_json """
# 1 对比下差异
json_images = set([x['file_name'] for x in self.gt_dict['images']])
dir_images = set(os.listdir(str(imdir)))
# df = SetCmper({'json_images': json_images, 'dir_images': dir_images}).intersection()
# print('json_images intersection dir_images:')
# print(df)
# 2 精简json
gt_dict = self.select_gt(json_images & dir_images)
if inplace: self.gt_dict = gt_dict
return gt_dict
def reset_image_id(self, start=1, *, inplace=False):
""" 按images顺序对图片重编号 """
gt_dict = self.gt_dict if inplace else copy.deepcopy(self.gt_dict)
# 1 重置 images 的 id
old2new = {}
for i, im in enumerate(gt_dict['images'], start=start):
old2new[im['id']] = i
im['id'] = i
# 2 重置 annotations 的 id
for anno in gt_dict['annotations']:
anno['image_id'] = old2new[anno['image_id']]
return gt_dict
def reset_box_id(self, start=1, *, inplace=False):
anns = self.gt_dict['annotations']
if not inplace:
anns = copy.deepcopy(anns)
for i, anno in enumerate(anns, start=start):
anno['id'] = i
return anns
def to_labelme_cls(self, root, *, bbox=True, seg=False, info=False):
"""
:param root: 图片根目录
:return:
extdata,存储了一些匹配异常信息
"""
root, data = Dir(root), {}
catid2name = {x['id']: x['name'] for x in self.gt_dict['categories']}
# 1 准备工作,构建文件名索引字典
gs = PathGroups.groupby(root.select_files('**/*'))
# 2 遍历生成labelme数据
not_finds = set() # coco里有的图片,root里没有找到
multimatch = dict() # coco里的某张图片,在root找到多个匹配文件
for img, anns in tqdm(self.group_gt(reserve_empty=True), disable=not info):
# 2.1 文件匹配
imfiles = gs.find_files(img['file_name'])
if not imfiles: # 没有匹配图片的,不处理
not_finds.add(img['file_name'])
continue
elif len(imfiles) > 1:
multimatch[img['file_name']] = imfiles
imfile = imfiles[0]
else:
imfile = imfiles[0]
# 2.2 数据内容转换
lmdict = LabelmeDict.gen_data(imfile)
img = DictTool.or_(img, {'xltype': 'image'})
lmdict['shapes'].append(LabelmeDict.gen_shape(json.dumps(img, ensure_ascii=False), [[-10, 0], [-5, 0]]))
for ann in anns:
if bbox:
ann = DictTool.or_(ann, {'category_name': catid2name[ann['category_id']]})
label = json.dumps(ann, ensure_ascii=False)
shape = LabelmeDict.gen_shape(label, xywh2ltrb(ann['bbox']))
lmdict['shapes'].append(shape)
if seg:
# 把分割也显示出来(用灰色)
for x in ann['segmentation']:
an = {'box_id': ann['id'], 'xltype': 'seg', 'shape_color': [191, 191, 191]}
label = json.dumps(an, ensure_ascii=False)
lmdict['shapes'].append(LabelmeDict.gen_shape(label, x))
f = imfile.with_suffix('.json')
data[f.relpath(root)] = lmdict
return LabelmeDataset(root, data,
extdata={'categories': self.gt_dict['categories'],
'not_finds': not_finds,
'multimatch': Groups(multimatch)})
def to_labelme(self, root, *, bbox=True, seg=False, info=False):
self.to_labelme_cls(root, bbox=bbox, seg=seg, info=info).writes()
def split_data(self, parts, *, shuffle=True):
""" 数据拆分器
:param dict parts: 每个部分要拆分、写入的文件名,以及数据比例
py≥3.6的版本中,dict的key是有序的,会按顺序处理开发者输入的清单
这里比例求和可以不满1,但不能超过1
:param bool shuffle: 是否打乱原有images顺序
:return: 同parts的字典,但值变成了拆分后的coco数据
"""
# 1 读入data
assert sum(parts.values()) <= 1, '比例和不能超过1'
data = self.gt_dict
if shuffle:
data = data.copy()
data['images'] = data['images'].copy()
random.shuffle(data['images'])
# 2 生成每一个部分的文件
def select_annotations(annotations, image_ids):
# 简单的for循环和if操作,可以用“列表推导式”写
return [an for an in annotations if (an['image_id'] in image_ids)]
res = {}
total_num, used_rate = len(data['images']), 0
for k, v in parts.items():
# 2.1 选择子集图片
images = data['images'][int(used_rate * total_num):int((used_rate + v) * total_num)]
image_ids = {im['id'] for im in images}
# 2.2 生成新的字典
res[k] = {'images': images,
'annotations': select_annotations(data['annotations'], image_ids),
'categories': data['categories']}
# 2.4 更新使用率
used_rate += v
return res
class CocoData(CocoGtData):
""" 这个类可以封装一些需要gt和dt衔接的功能 """
def __init__(self, gt, dt=None, *, min_score=0):
"""
:param gt: gt的dict或文件
gt是必须传入的,可以只传入gt
有些任务理论上可以只有dt,但把配套的gt传入,能做更多事
:param dt: dt的list或文件
:param min_score: CocoMatch这个系列的类,初始化增加min_score参数,支持直接滤除dt低置信度的框
"""
super().__init__(gt)
def get_dt_list(dt, min_score=0):
# dt
default_dt = []
# default_dt = [{'image_id': self.gt_dict['images'][0]['id'],
# 'category_id': self.gt_dict['categories'][0]['id'],
# 'bbox': [0, 0, 1, 1],
# 'score': 1}]
# 这样直接填id有很大的风险,可能会报错。但是要正确填就需要gt的信息,传参麻烦~~
# default_dt = [{'image_id': 1, 'category_id': 1, 'bbox': [0, 0, 1, 1], 'score': 1}]
if not dt:
dt_list = default_dt
else:
dt_list = dt if isinstance(dt, (list, tuple)) else File(dt).read()
if min_score:
dt_list = [b for b in dt_list if (b['score'] >= min_score)]
if not dt_list:
dt_list = default_dt
return dt_list
self.dt_list = get_dt_list(dt, min_score)
@classmethod
def is_dt_list(cls, dt_list):
if not isinstance(dt_list, (tuple, list)):
return False
item = dt_list[0]
has_keys = set('score image_id category_id bbox'.split())
return not (has_keys - item.keys())
def select_dt(self, ids, *, inplace=False):
gt_dict, dt_list = self.gt_dict, self.dt_list
# 1 ids 统一为int类型的id值
if not isinstance(ids, (list, tuple, set)):
ids = [ids]
if gt_dict:
map_name2id = {item['file_name']: item['id'] for item in gt_dict['images']}
ids = [(map_name2id[x] if isinstance(x, str) else x) for x in ids]
ids = set(ids)
# 2 简化images
dst = [x for x in dt_list if (x['image_id'] in ids)]
if inplace: self.dt_list = dst
return dst
def group_dt(self, *, reserve_empty=False):
""" 对annos按image_id分组,返回 [(im1, dt_anns1), (im2, dt_anns2), ...] """
group_anns = defaultdict(list)
[group_anns[an['image_id']].append(an) for an in self.dt_list]
return self._group_base(group_anns, reserve_empty)
def group_gt_dt(self, *, reserve_empty=False):
""" 获得一张图片上gt和dt的标注结果
[(im, gt_anns, dt_anns), ...]
"""
raise NotImplementedError
def to_icdar_label_quad(self, outfile, *, min_score=0):
""" 将coco的dt结果转为icdar的标注格式
存成一个zip文件,zip里面每张图对应一个txt标注文件
每个txt文件用quad八个数值代表一个标注框
适用于 sroie 检测格式
"""
# 1 获取dt_list
if min_score:
dt_list = [b for b in self.dt_list if (b['score'] >= min_score)]
else:
dt_list = self.dt_list
# 2 转df,按图片分组处理
df = pd.DataFrame.from_dict(dt_list) # noqa from_dict可以传入List[Dict]
df = df.groupby('image_id')
# 3 建立一个zip文件
myzip = ZipFile(str(outfile), 'w')
# 4 遍历每一组数据,生成一个文件放到zip里面
id2name = {im['id']: pathlib.Path(im['file_name']).stem for im in self.gt_dict['images']}
for image_id, items in df:
label_file = id2name[image_id] + '.txt'
quads = [rect2polygon(xywh2ltrb(x), dtype=int).reshape(-1) for x in items['bbox']]
quads = [','.join(map(str, x)) for x in quads]
myzip.writestr(label_file, '\n'.join(quads))
myzip.close()
class Coco2Labelme(ToLabelmeJson):
""" coco格式的可视化
TODO segmentation 分割 效果的可视化
"""
def add_segmentation(self, row):
""" 分割默认先都用灰色标注 """
r = dict()
r['gt_box_id'] = row['gt_box_id']
r['label'] = 'seg'
r['points'] = row['gt_ltrb']
r['shape_color'] = [191, 191, 191]
# 5 保存
self.add_shape2(**r)
# def _sort_anns(self, anns):
# if anns and 'score' in anns[0]:
# anns = sorted(anns, key=lambda x: -x['score']) # 权重从大到小排序
# return anns
def add_gt_shape(self, row, attrs=None):
"""
:param row: df的一行数据series
:param attrs: 其他扩展字段值
"""
# 1 基本字段
r = dict()
for name in ['gt_box_id', 'gt_category_id', 'gt_area']:
r[name] = row[name]
r['gt_ltrb'] = ','.join(map(str, row['gt_ltrb']))
# 2 主要字段
r['label'] = row['gt_category_name'] # 这个需要上层的anns_match2, labelme_match传入的df实现提供这个字段
r['points'] = row['gt_ltrb']
if row['gt_supercategory'] != '':
r['group_id'] = row['gt_supercategory']
# 3 row中其他自定义字段
# 这些是已经处理过的标准字段,进入黑名单,不显示;其他字段默认白名单都显示
std_an_keys = set('gt_box_id gt_category_id gt_ltrb gt_area iscrowd file_name '
'gt_category_name gt_supercategory gt_segmentation dt_segmentation'.split())
# 如果跟labelme的标准字段重名了,需要区分下:比如 label
std_lm_keys = set('label points group_id shape_type flags'.split()) # labelme的标准字段
ks = set(row.index) - std_an_keys
for k in ks:
if k in std_lm_keys:
r['_' + k] = row[k]
else:
r[k] = row[k]
if 'dt_ltrb' in r:
r['dt_ltrb'] = ','.join(map(str, r['dt_ltrb']))
# 4 精简字段:聚合以dt、gt为前缀的所有字段
group_keys = defaultdict(list)
res = dict()
for k, v in r.items():
for part in ('dt', 'gt'):
if k.startswith(part + '_'):
group_keys[part].append(k)
break
else:
res[k] = v
# 聚合后的属性排序准则
order = ['category_id', 'category_name', 'score', 'ltrb', 'area', 'box_id']
idxfunc = make_index_function(order)
for part in ('dt', 'gt'):
keys = group_keys[part]
m = len(part) + 1
keys.sort(key=lambda k: idxfunc(k[m:]))
res[part] = '/'.join([str(r[k]) for k in keys]) # 数值拼接
res['~' + part] = '/'.join([str(k[m:]) for k in keys]) # 解释key,如果很熟悉了可以选择关闭
# 5 扩展字段
if attrs:
res.update(attrs)
# 6 保存
self.add_shape2(**res)
def add_dt_shape(self, row, attrs=None):
# 1 基本字段
r = dict()
for name in ['iou', 'dt_category_id', 'dt_score']:
r[name] = row[name]
r['dt_ltrb'] = ','.join(map(str, row['dt_ltrb']))
# 2 主要字段
r['label'] = row['dt_category_name']
if 'dt_segmentation' in row:
r['points'] = row['dt_segmentation'][0]
else:
r['points'] = row['dt_ltrb']
# 3 扩展字段
if attrs:
r.update(attrs)
# 4 保存
self.add_shape2(**r)
def _anns_init(self, df, segmentation=False):
df = df.copy()
df.drop(['image_id'], axis=1, inplace=True)
columns = df.columns
if segmentation:
pass
else:
if 'gt_segmentation' in columns:
df.drop('gt_segmentation', axis=1, inplace=True)
if 'dt_segmentation' in columns:
df.drop('dt_segmentation', axis=1, inplace=True)
return df
def anns_gt(self, df, *, segmentation=False, shape_attrs=None):
""" Coco2Df.gt的可视化
:param df: Coco2Df生成的df后,输入特定的某一组image_id、file_name
:param segmentation: 是否显示segmentation分割效果
:param shape_attrs: 人工额外强制设置的字段值
"""
df = self._anns_init(df, segmentation)
for idx, row in df.iterrows():
if segmentation:
self.add_segmentation(row)
self.add_gt_shape(row, shape_attrs)
def anns_match(self, df, *, hide_match_dt=False, segmentation=False, shape_attrs=None):
""" Coco2Df.match的可视化
正确的gt用绿框,位置匹配到但类别错误的用黄框,绿黄根据iou设置颜色深浅,此时dt统一用灰色框
漏检的gt用红框,多余的dt用蓝框
:param hide_match_dt: 不显示灰色的dt框
TODO 研究labelme shape的flags参数含义,支持shape的过滤显示?
"""
df = self._anns_init(df, segmentation)
if not shape_attrs:
shape_attrs = {}
def get_attrs(d):
return dict(ChainMap(shape_attrs, d))
for idx, row in df.iterrows():
r = row
if r['gt_category_id'] == -1: # 多余的dt
self.add_dt_shape(r, get_attrs({'shape_color': [0, 0, 255]}))
elif r['dt_category_id'] == -1: # 没有被匹配到的gt
self.add_gt_shape(r, get_attrs({'shape_color': [255, 0, 0]}))
else: # 匹配到的gt和dt
if not hide_match_dt:
self.add_dt_shape(r, get_attrs({'shape_color': [191, 191, 191]}))
color_value = int(255 * r['iou'])
if r['gt_category_id'] == r['dt_category_id']:
self.add_gt_shape(r, get_attrs({'shape_color': [0, color_value, 0]}))
else:
self.add_gt_shape(r, get_attrs({'shape_color': [color_value, color_value, 0]}))
def anns_match2(self, df, *, hide_match_dt=False, segmentation=False, shape_attrs=None, colormap=None):
""" 按类别区分框颜色
"""
import imgviz
df = self._anns_init(df, segmentation)
if not shape_attrs:
shape_attrs = {}
def get_attrs(d):
return dict(ChainMap(shape_attrs, d))
if not colormap:
colormap = imgviz.label_colormap(value=200)
m = len(colormap)
for idx, row in df.iterrows():
r = row
attrs = {'shape_color': colormap[r['gt_category_id'] % m],
'vertex_fill_color': colormap[r['dt_category_id'] % m]}
if r['gt_category_id'] == -1: # 多余的dt
self.add_dt_shape(r, get_attrs(attrs))
elif r['dt_category_id'] == -1: # 没有被匹配到的gt
self.add_gt_shape(r, get_attrs(attrs))
else: # 匹配到的gt和dt
if not hide_match_dt:
self.add_dt_shape(r, get_attrs({'shape_color': [191, 191, 191]}))
attrs['vertex_fill_color'] = [int(r['iou'] * v) for v in attrs['vertex_fill_color']]
self.add_gt_shape(r, get_attrs(attrs))
class CocoEval(CocoData):
def __init__(self, gt, dt, iou_type='bbox', *, min_score=0, print_mode=False):
"""
TODO coco_gt、coco_dt本来已存储了很多标注信息,有些冗余了,是否可以跟gt_dict、dt_list等整合,去掉些没必要的组件?
"""
super().__init__(gt, dt, min_score=min_score)
# type
self.iou_type = iou_type
# evaluater
self.coco_gt = COCO(gt, print_mode=print_mode) # 这不需要按图片、类型分类处理
self.coco_dt, self.evaluater = None, None
if self.dt_list:
self.coco_dt = self.coco_gt.loadRes(self.dt_list) # 这个返回也是coco对象
self.evaluater = COCOeval(self.coco_gt, self.coco_dt, iou_type, print_mode=print_mode)
@classmethod
def evaluater_eval(cls, et, img_ids=None, *, print_mode=False):
""" coco官方目标检测测评方法
https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
:param img_ids:
:param print_mode: 注意这里的print_mode不同于初始化的print_mode,指的是不同的东西
:return:
"""
# 1 coco是有方法支持过滤id,只计算部分图的分值结果
# 没有输入img_ids,也要显式重置为全部数据
if not img_ids:
img_ids = et.cocoGt.imgIds.values()
et.params.imgIds = list(img_ids)
# 2 每张图片、每个类别的iou等核心数据的计算
et.evaluate()
# 在不同参数测评指标下的分数
et.accumulate()
# 3 显示结果
if print_mode: # 如果要显示结果则使用标准计算策略
et.summarize(print_mode=print_mode)
return round(et.stats[0], 4)
else: # 否则简化计算过程
return round(et.step_summarize(), 4)
def eval(self, img_ids=None, *, print_mode=False):
return self.evaluater_eval(self.evaluater, img_ids=img_ids, print_mode=print_mode)
def eval_dt_score(self, step=0.1):
""" 计算按一定阈值滤除框后,对coco指标产生的影响 """
dt_list = copy.copy(self.dt_list)
i = 0
records = []
columns = ['≥dt_score', 'n_dt_box', 'coco_score']
while i < 1:
dt_list = [x for x in dt_list if x['score'] >= i]
if not dt_list: break
coco_dt = self.coco_gt.loadRes(dt_list)
evaluater = COCOeval(self.coco_gt, coco_dt, self.iou_type)
records.append([i, len(dt_list), self.evaluater_eval(evaluater)])
i += step
df = pd.DataFrame.from_records(records, columns=columns)
return df
def parse_dt_score(self, step=0.1, *, print_mode=False):
""" dt按不同score过滤后效果
注意如果数据集很大,这个功能运算特别慢,目前测试仅20张图都要10秒
可以把print_mode=True打开观察中间结果
注意这个方法,需要调用后面的 CocoMatch
"""
gt_dict, dt_list = self.gt_dict, self.dt_list
i = 0
records = []
columns = ['≥dt_score', 'n_dt_box', 'n_match_box', 'n_matchcat_box',
'coco_score',
'icdar2013', 'ic13_precision', 'ic13_recall',
'f1_score']
if print_mode: print(columns)
while i < 1:
dt_list = [x for x in dt_list if x['score'] >= i]
if not dt_list: break
cm = CocoMatch(gt_dict, dt_list, eval_im=False)
ie = IcdarEval(*cm.to_icdareval_data())
ic13 = ie.icdar2013()
row = [i, cm.n_dt_box(), cm.n_match_box(), cm.n_matchcat_box(),
cm.eval(), ic13['hmean'], ic13['precision'], ic13['recall'], cm.f1_score()]
if print_mode: print(row)
records.append(row)
i += step
df = pd.DataFrame.from_records(records, columns=columns)
if print_mode:
with pd.option_context('display.max_colwidth', -1, 'display.max_columns', 20,
'display.width', 200): # 上下文控制格式
print(df)
return df
class CocoParser(CocoEval):
def __init__(self, gt, dt=None, iou_type='bbox', *, min_score=0, print_mode=False):
""" coco格式相关分析工具,dt不输入也行,当做没有任何识别结果处理~~
相比CocoMatch比较轻量级,不会初始化太久,但提供了一些常用的基础功能
"""
super().__init__(gt, dt, iou_type, min_score=min_score, print_mode=print_mode)
# gt里的images、categories数据,已转成df表格格式
self.images, self.categories = self._get_images_df(), self._get_categories_df()
# gt、dt的统计表
self.gt_anns, self.dt_anns = self._get_gt_anns_df(), self._get_dt_anns_df()
@classmethod
def bbox2ltrb(cls, b):
return [int(round(v, 0)) for v in xywh2ltrb(b)]
def _get_images_df(self):
""" 从gt['images']转df
"""
df = pd.DataFrame.from_dict(self.gt_dict['images'])
df.rename(columns={'id': 'image_id'}, inplace=True)
df.set_index('image_id', inplace=True)
return df
def _get_categories_df(self):
""" 把gt['categories']转df
"""
df = pd.DataFrame.from_dict(self.gt_dict['categories'])
df.rename(columns={'id': 'category_id'}, inplace=True)
df.set_index('category_id', inplace=True)
return df
def _get_gt_anns_df(self):
""" 输入gt的json文件或字典,转df格式
# TODO 暂时没考虑iscrowd=1的情况,先不处理这个字段
"""
# 1 读取数据,转字典
df = | pd.DataFrame.from_dict(self.gt_dict['annotations']) | pandas.DataFrame.from_dict |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
diego_benchmark/diego_titanic.py was created on 2019/04/06.
file in :relativeFile
Author: Charles_Lai
Email: <EMAIL>
"""
from sklearn.preprocessing import LabelEncoder
from base_benchmark import simple_diego
import pandas as pd
import numpy as np
train_df_raw = pd.read_csv('data/titanic/train.csv')
test_df_raw = pd.read_csv('data/titanic/test.csv')
def preprocess_data(df):
processed_df = df
########## Deal with missing values ##########
# As we saw before, the two missing values for embarked columns can be replaced by 'C' (Cherbourg)
processed_df['Embarked'].fillna('C', inplace=True)
# We replace missing ages by the mean age of passengers who belong to the same group of class/sex/family
processed_df['Age'] = processed_df.groupby(['Pclass','Sex','Parch','SibSp'])['Age'].transform(lambda x: x.fillna(x.mean()))
processed_df['Age'] = processed_df.groupby(['Pclass','Sex','Parch'])['Age'].transform(lambda x: x.fillna(x.mean()))
processed_df['Age'] = processed_df.groupby(['Pclass','Sex'])['Age'].transform(lambda x: x.fillna(x.mean()))
# We replace the only missing fare value for test processed_df and the missing values of the cabin column
processed_df['Fare'] = processed_df['Fare'].interpolate()
processed_df['Cabin'].fillna('U', inplace=True)
########## Feature engineering on columns ##########
# Create a Title column from name column
processed_df['Title'] = pd.Series((name.split('.')[0].split(',')[1].strip() for name in train_df_raw['Name']), index=train_df_raw.index)
processed_df['Title'] = processed_df['Title'].replace(['Lady', 'the Countess','Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
processed_df['Title'] = processed_df['Title'].replace(['Mlle', 'Ms'], 'Miss')
processed_df['Title'] = processed_df['Title'].replace('Mme', 'Mrs')
# Create a Family Size, Is Alone, Child and Mother columns
processed_df['FamilySize'] = processed_df['SibSp'] + processed_df['Parch'] + 1
processed_df['FamilySize'][processed_df['FamilySize'].between(1, 5, inclusive=False)] = 2
processed_df['FamilySize'][processed_df['FamilySize']>5] = 3
processed_df['IsAlone'] = np.where(processed_df['FamilySize']!=1, 0, 1)
processed_df['IsChild'] = processed_df['Age'] < 18
processed_df['IsChild'] = processed_df['IsChild'].astype(int)
# Modification of cabin column to keep only the letter contained corresponding to the deck of the boat
processed_df['Cabin'] = processed_df['Cabin'].str[:1]
processed_df['Cabin'] = processed_df['Cabin'].map({cabin: p for p, cabin in enumerate(set(cab for cab in processed_df['Cabin']))})
#Continuous variable bins; qcut vs cut: https://stackoverflow.com/questions/30211923/what-is-the-difference-between-pandas-qcut-and-pandas-cut
#Fare Bins/Buckets using qcut or frequency bins: https://pandas.pydata.org/pandas-docs/stable/generated/pandas.qcut.html
processed_df['FareBin'] = pd.qcut(processed_df['Fare'], 4)
#Age Bins/Buckets using cut or value bins: https://pandas.pydata.org/pandas-docs/stable/generated/pandas.cut.html
processed_df['AgeBin'] = pd.cut(processed_df['Age'].astype(int), 5)
label = LabelEncoder()
# Converting.
processed_df['Sex_Code'] = label.fit_transform(processed_df['Sex'])
processed_df['Embarked_Code'] = label.fit_transform(processed_df['Embarked'])
processed_df['Title_Code'] = label.fit_transform(processed_df['Title'])
processed_df['AgeBin_Code'] = label.fit_transform(processed_df['AgeBin'])
processed_df['FareBin_Code'] = label.fit_transform(processed_df['FareBin'])
dummy_cols = ['Sex','Pclass', 'Embarked', 'Title','SibSp', 'Parch', 'Age', 'Fare', 'FamilySize', 'IsAlone']
data1_dummy = pd.get_dummies(processed_df[dummy_cols])
print(data1_dummy.columns)
data1_dummy['PassengerId'] = processed_df['PassengerId']
# converting
processed_df['Title'] = processed_df['Title'].map({"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5})
# Filling Age missing values with mean age of passengers who have the same title
processed_df['Age'] = processed_df.groupby(['Title'])['Age'].transform(lambda x: x.fillna(x.mean()))
# Transform categorical variables to numeric variables
processed_df['Sex'] = processed_df['Sex'].map({'male': 0, 'female': 1})
processed_df['Embarked'] = processed_df['Embarked'].map({'S': 0, 'C': 1, 'Q': 2})
processed_df = pd.merge(processed_df, data1_dummy, on=['PassengerId'])
# Create a ticket survivor column which is set to 1 if an other passenger with the same ticket survived and 0 else
# Note : this implementation is ugly and unefficient, if sombody found a way to do it easily with pandas (it must be a way), please comment the kernel with your solution !
processed_df['TicketSurvivor'] = | pd.Series(0, index=processed_df.index) | pandas.Series |
"""Free functions to help out with EFD operations.
"""
from astropy.time import Time
import numpy
import pandas
def merge_packed_time_series(packed_dataframe, base_field, stride=1,
ref_timestamp_col="cRIO_timestamp", fmt='unix_tai',
scale='tai'):
"""Select fields that are time samples and unpack them into a dataframe.
Parameters
----------
packed_dataframe : `pandas.DataFrame`
packed data frame containing the desired data
base_field : `str`
Base field name that will be expanded to query all
vector entries.
stride : `int`, optional
Only use every stride value when unpacking. Must be a factor
of the number of packed values.
(1 by default)
ref_timestamp_col : `str`, optional
Name of the field name to use to assign timestamps to unpacked
vector fields (default is 'cRIO_timestamp').
fmt : `str`, optional
Format to give to the `astropy.Time` constructor. Defaults to
'unix_tai' since most internal timestamp columns are in TAI.
scale : `str`, optional
Time scale to give to the `astropy.Time` constructor. Defaults to
'tai'.
Returns
-------
result : `pandas.DataFrame`
A `pandas.DataFrame` containing the results of the query.
"""
packed_fields = [k for k in packed_dataframe.keys() if k.startswith(base_field) and k[len(base_field):].isdigit()]
packed_fields = sorted(packed_fields, key=lambda k: int(k[len(base_field):])) # sort by pack ID
npack = len(packed_fields)
if npack % stride != 0:
raise RuntimeError(f"Stride must be a factor of the number of packed fields: {stride} v. {npack}")
packed_len = len(packed_dataframe)
n_used = npack//stride # number of raw fields being used
output = numpy.empty(n_used * packed_len)
times = numpy.empty_like(output, dtype=packed_dataframe[ref_timestamp_col][0])
if packed_len == 1:
dt = 0
else:
dt = (packed_dataframe[ref_timestamp_col][1] - packed_dataframe[ref_timestamp_col][0])/npack
for i in range(0, npack, stride):
i0 = i//stride
output[i0::n_used] = packed_dataframe[f"{base_field}{i}"]
times[i0::n_used] = packed_dataframe[ref_timestamp_col] + i*dt
timestamps = Time(times, format=fmt, scale=scale)
return pandas.DataFrame({base_field: output, "times": times}, index=timestamps.utc.datetime64)
def resample(df1, df2, interp_type='time'):
"""Resample one DataFrame onto another.
Parameters
----------
df1 : `pandas.DataFrame`
First `pandas.DataFrame`.
df2 : `pandas.DataFrame`
Second `pandas.DataFrame`.
interp_type : `str`, optional
Type of interpolation to perform (default is 'time').
Returns
-------
result : `pandas.DataFrame`
The resulting resampling is bi-directional.
That is the length of the resulting `pandas.DataFrame` is the
sum of the lengths of the inputs.
"""
df = pandas.concat([df1, df2], axis=1) # Sort in this context does not sort the data
df = df.sort_index()
return df.interpolate(type=interp_type)
def rendezvous_dataframes(left, right, direction='backward', tolerance= | pandas.Timedelta(days=20) | pandas.Timedelta |
import pytest
from pandas.compat import pa_version_under4p0
from pandas.core.dtypes.dtypes import PeriodDtype
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import (
PeriodArray,
period_array,
)
pa = pytest.importorskip("pyarrow", minversion="1.0.1")
def test_arrow_extension_type():
from pandas.core.arrays._arrow_utils import ArrowPeriodType
p1 = ArrowPeriodType("D")
p2 = ArrowPeriodType("D")
p3 = ArrowPeriodType("M")
assert p1.freq == "D"
assert p1 == p2
assert not p1 == p3
assert hash(p1) == hash(p2)
assert not hash(p1) == hash(p3)
@pytest.mark.parametrize(
"data, freq",
[
(pd.date_range("2017", periods=3), "D"),
(pd.date_range("2017", periods=3, freq="A"), "A-DEC"),
],
)
def test_arrow_array(data, freq):
from pandas.core.arrays._arrow_utils import ArrowPeriodType
periods = period_array(data, freq=freq)
result = pa.array(periods)
assert isinstance(result.type, ArrowPeriodType)
assert result.type.freq == freq
expected = pa.array(periods.asi8, type="int64")
assert result.storage.equals(expected)
# convert to its storage type
result = pa.array(periods, type=pa.int64())
assert result.equals(expected)
# unsupported conversions
msg = "Not supported to convert PeriodArray to 'double' type"
with pytest.raises(TypeError, match=msg):
pa.array(periods, type="float64")
with pytest.raises(TypeError, match="different 'freq'"):
pa.array(periods, type=ArrowPeriodType("T"))
def test_arrow_array_missing():
from pandas.core.arrays._arrow_utils import ArrowPeriodType
arr = PeriodArray([1, 2, 3], freq="D")
arr[1] = pd.NaT
result = pa.array(arr)
assert isinstance(result.type, ArrowPeriodType)
assert result.type.freq == "D"
expected = pa.array([1, None, 3], type="int64")
assert result.storage.equals(expected)
@pytest.mark.xfail(
pa_version_under4p0, reason="pyarrow incorrectly uses pandas internals API"
)
def test_arrow_table_roundtrip():
from pandas.core.arrays._arrow_utils import ArrowPeriodType
arr = | PeriodArray([1, 2, 3], freq="D") | pandas.core.arrays.PeriodArray |
import numpy as np
import pytest
from pandas import IntervalIndex, Series, period_range
import pandas._testing as tm
class TestValues:
@pytest.mark.parametrize(
"data",
[
period_range("2000", periods=4),
IntervalIndex.from_breaks([1, 2, 3, 4]),
],
)
def test_values_object_extension_dtypes(self, data):
# https://github.com/pandas-dev/pandas/issues/23995
result = | Series(data) | pandas.Series |
from datetime import timedelta
from functools import partial
import itertools
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
from zipline.pipeline.loaders.earnings_estimates import (
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import pytest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date, sids, tuples, end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples, columns=[SID_FIELD_NAME, "estimate", "knowledge_date"])
df = df.pivot_table(
columns=SID_FIELD_NAME, values="estimate", index="knowledge_date", dropna=False
)
df = df.reindex(pd.date_range(start_date, end_date))
# Index name is lost during reindex.
df.index = df.index.rename("knowledge_date")
df["at_date"] = end_date.tz_localize("utc")
df = df.set_index(["at_date", df.index.tz_localize("utc")]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp("2014-12-28", tz="utc")
END_DATE = pd.Timestamp("2015-02-04", tz="utc")
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError("make_loader")
@classmethod
def make_events(cls):
raise NotImplementedError("make_events")
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days,
self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
"s" + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(
cls.events, {column.name: val for column, val in cls.columns.items()}
)
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: "event_date",
MultipleColumnsEstimates.fiscal_quarter: "fiscal_quarter",
MultipleColumnsEstimates.fiscal_year: "fiscal_year",
MultipleColumnsEstimates.estimate1: "estimate1",
MultipleColumnsEstimates.estimate2: "estimate2",
}
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate1": [1.0, 2.0],
"estimate2": [3.0, 4.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def make_expected_out(cls):
raise NotImplementedError("make_expected_out")
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp("2015-01-15", tz="utc"),
end_date=pd.Timestamp("2015-01-15", tz="utc"),
)
assert_frame_equal(results.sort_index(1), self.expected_out.sort_index(1))
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-10"),
"estimate1": 1.0,
"estimate2": 3.0,
FISCAL_QUARTER_FIELD_NAME: 1.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-20"),
"estimate1": 2.0,
"estimate2": 4.0,
FISCAL_QUARTER_FIELD_NAME: 2.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
dummy_df = pd.DataFrame(
{SID_FIELD_NAME: 0},
columns=[
SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
"estimate",
],
index=[0],
)
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {
c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1, bad_dataset2, good_dataset)
for c in dataset.columns
}
p = Pipeline(columns)
err_msg = (
r"Passed invalid number of quarters -[0-9],-[0-9]; "
r"must pass a number of quarters >= 0"
)
with pytest.raises(ValueError, match=err_msg):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with pytest.raises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = [
"split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof",
]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(
itertools.product(
(
NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
),
)
)
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
with pytest.raises(ValueError):
loader(
dummy_df,
{column.name: val for column, val in columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"),
)
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp("2015-01-28", tz="utc")
q1_knowledge_dates = [
pd.Timestamp("2015-01-01"),
pd.Timestamp("2015-01-04"),
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-11"),
]
q2_knowledge_dates = [
pd.Timestamp("2015-01-14"),
pd.Timestamp("2015-01-17"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-23"),
]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
] # One day late
q2_release_dates = [
pd.Timestamp("2015-01-25"), # One day early
pd.Timestamp("2015-01-26"),
]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates + cls.q2_knowledge_dates, 4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (
q1e1 < q1e2
and q2e1 < q2e2
# All estimates are < Q2's event, so just constrain Q1
# estimates.
and q1e1 < cls.q1_release_dates[0]
and q1e2 < cls.q1_release_dates[0]
):
sid_estimates.append(
cls.create_estimates_df(q1e1, q1e2, q2e1, q2e2, sid)
)
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates + sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-13"), pd.Timestamp("2015-01-26")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-26"),
],
"estimate": [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid,
}
)
@classmethod
def create_estimates_df(cls, q1e1, q1e2, q2e1, q2e2, sid):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
"estimate": [0.1, 0.2, 0.3, 0.4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
}
)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert sid_estimates.isnull().all().all()
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[
self.get_expected_estimate(
q1_knowledge[
q1_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
q2_knowledge[
q2_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
date.tz_localize(None),
).set_index([[date]])
for date in sid_estimates.index
],
axis=0,
)
sid_estimates.index = all_expected.index.copy()
assert_equal(all_expected[sid_estimates.columns], sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q2_knowledge.iloc[-1:]
elif (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate": [1.0, 2.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(
columns=[cls.columns[col] + "1" for col in cls.columns]
+ [cls.columns[col] + "2" for col in cls.columns],
index=cls.trading_days,
)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ("1", "2")
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(expected[expected_name])
else:
expected[expected_name] = expected[expected_name].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge(
[
{c.name + "1": c.latest for c in dataset1.columns},
{c.name + "2": c.latest for c in dataset2.columns},
]
)
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + "1" for col in self.columns]
q2_columns = [col.name + "2" for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(
sorted(np.array(q1_columns + q2_columns)), sorted(results.columns.values)
)
assert_equal(
self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1),
)
class NextEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp(
"2015-01-11", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp("2015-01-11", tz="UTC") : pd.Timestamp(
"2015-01-20", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ["estimate", "event_date"]:
expected.loc[
pd.Timestamp("2015-01-06", tz="UTC") : pd.Timestamp(
"2015-01-10", tz="UTC"
),
col_name + "2",
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-09", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 2
expected.loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 3
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_YEAR_FIELD_NAME + "2",
] = 2015
return expected
class PreviousEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp(
"2015-01-19", tz="UTC"
)
] = cls.events[raw_name].iloc[0]
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ["estimate", "event_date"]:
expected[col_name + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[col_name].iloc[0]
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 4
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 2014
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 1
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 2015
return expected
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
]
* 2,
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-20"),
],
"estimate": [11.0, 12.0, 21.0] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6,
}
)
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError("assert_compute")
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=pd.Timestamp("2015-01-13", tz="utc"),
# last event date we have
end_date=pd.Timestamp("2015-01-14", tz="utc"),
)
class PreviousVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp("2015-02-10", tz="utc")
window_test_start_date = pd.Timestamp("2015-01-05")
critical_dates = [
pd.Timestamp("2015-01-09", tz="utc"),
pd.Timestamp("2015-01-15", tz="utc"),
pd.Timestamp("2015-01-20", tz="utc"),
pd.Timestamp("2015-01-26", tz="utc"),
pd.Timestamp("2015-02-05", tz="utc"),
pd.Timestamp("2015-02-10", tz="utc"),
]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-02-10"),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp("2015-01-18"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-04-01"),
],
"estimate": [100.0, 101.0] + [200.0, 201.0] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
}
)
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-15"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-02-05"),
pd.Timestamp("2015-02-05"),
],
"estimate": [110.0, 111.0] + [310.0, 311.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10,
}
)
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-07"),
cls.window_test_start_date,
pd.Timestamp("2015-01-17"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
],
"estimate": [120.0, 121.0] + [220.0, 221.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20,
}
)
concatted = pd.concat(
[sid_0_timeline, sid_10_timeline, sid_20_timeline]
).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [
sid for i in range(len(sids) - 1) for sid in range(sids[i], sids[i + 1])
] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids(),
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(
self, start_date, num_announcements_out
):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date)
- self.trading_days.get_loc(self.window_test_start_date)
+ 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = (
timelines[num_announcements_out]
.loc[today]
.reindex(trading_days[: today_idx + 1])
.values
)
timeline_start_idx = len(today_timeline) - window_len
assert_almost_equal(estimate, today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp("2015-02-10", tz="utc"),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-20"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-21"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 111, pd.Timestamp("2015-01-22")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-01-22", "2015-02-04")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-02-05", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 201, pd.Timestamp("2015-02-10")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 221, | pd.Timestamp("2015-02-10") | pandas.Timestamp |
import csv
import json
import matplotlib.backends.backend_pdf as p
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import mpld3
import numpy as np
import pandas as pd
from PIL import Image, ImageOps
from matplotlib import pyplot
from mpld3 import plugins
from mpld3 import utils
from urllib3.connectionpool import xrange
from adminapp.models import *
class ClickInfo(plugins.PluginBase):
"""Plugin for getting info on click"""
JAVASCRIPT = """
mpld3.register_plugin("clickinfo", ClickInfo);
ClickInfo.prototype = Object.create(mpld3.Plugin.prototype);
ClickInfo.prototype.constructor = ClickInfo;
ClickInfo.prototype.requiredProps = ["id"];
function ClickInfo(fig, props){
mpld3.Plugin.call(this, fig, props);
};
ClickInfo.prototype.draw = function(){
var obj = mpld3.get_element(this.props.id);
obj.elements().on("mousedown", function(d, i){
if (!clicked) {
url = window.location.href;
var pathArray = window.location.pathname.split('/');
if (pathArray.length == 5) {
newUrl = url + "/obj=" + i;
clicked = true;
}
else {
newUrl = url.replace("/" + pathArray[5], "/obj=" + i)
clicked = true;
}
document.location.href = newUrl;
}
});
}
"""
def __init__(self, points):
self.dict_ = {"type": "clickinfo",
"id": utils.get_id(points)}
class Visualizer:
def get_classifer_visualization(self, iteration_id, subspace_id, selected_obj, *args):
fig, ax = plt.subplots()
iteration = Iteration.objects.get(id=iteration_id)
setup = iteration.session_id.setup_id
subspace_order = 0
subspaces = Subspace.objects.filter(setup_id_id=setup.id).order_by('id')
features = []
dataset_type = setup.dataset_id.type
# find subspace order manually
for ss in subspaces:
if ss.id != int(subspace_id):
subspace_order += 1
else:
break
# this will be used in "feature_data_visible == no", filtering the visible data
for ss in subspaces:
if dataset_type == "HIPE":
if not ss.feature_x_id in features:
features.append(ss.feature_x_id)
if not ss.feature_y_id in features:
features.append(ss.feature_y_id)
elif dataset_type == "MNIST":
if not (ss.feature_x_id - 1) in features:
features.append((ss.feature_x_id - 1))
if not (ss.feature_y_id - 1) in features:
features.append((ss.feature_y_id - 1))
if dataset_type == 'HIPE':
feature_x_id = subspaces[subspace_order].feature_x_id
feature_y_id = subspaces[subspace_order].feature_y_id
elif dataset_type == 'MNIST':
feature_x_id = subspaces[subspace_order].feature_x_id - 1
feature_y_id = subspaces[subspace_order].feature_y_id - 1
feature_file = setup.dataset_id.feature_file.path
ocal_output = iteration.ocal_output
dict = json.loads(ocal_output)
subspace_gridpoints_all = json.loads(setup.subspaces_gridpoints_JSON)
labels = dict["prediction_subspaces"][subspace_order]
xy = []
x = []
y = []
timestamps = []
with open(feature_file) as f:
plots = csv.reader(f, delimiter=',')
headers = next(plots)
# get the x and y values for the graph
for row in plots:
timestamps.append(row[0])
x.append(float(row[feature_x_id]))
y.append(float(row[feature_y_id]))
xy.append(row)
# add some padding for a nicer view
paddingX = (max(x) - min(x)) / 10
paddingY = (max(y) - min(y)) / 10
plt.xlim(min(x) - paddingX, max(x) + paddingX)
plt.ylim(min(y) - paddingY, max(y) + paddingY)
# bring the points in (x,y) form
objects = np.column_stack((x, y))
w = list(objects)
# bring the points to a valid for for scatter()
points = np.array(w).astype("float")
label_values = np.array([])
counter = 0
ocal_selection_exists = False
# coloring the inliers and outliers
for label in labels:
value = -1
if counter == selected_obj:
value = 2 # the query-id to be questioned
elif counter == args[0] and setup.feedback_mode == "hybrid":
value = 3 # OCAL-Selection
ocal_selection_exists = True
elif label == "inlier":
value = 1
elif label == "outlier":
value = 0
label_values = np.append(label_values, value)
counter += 1
label_color = ['green' if i == 0 else 'blue' if i == 1 else 'yellow' if i == 3 else 'red' for i in label_values]
red_patch = mpatches.Patch(color='red', label='Selection')
blue_patch = mpatches.Patch(color='blue', label='Inlier')
green_patch = mpatches.Patch(color='green', label='Outlier')
if ocal_selection_exists and setup.feedback_mode == "hybrid":
yellow_patch = mpatches.Patch(color='yellow', label='OCAL Selection')
plt.legend(handles=[blue_patch, green_patch, red_patch, yellow_patch])
else:
plt.legend(handles=[blue_patch, green_patch, red_patch])
scatter = plt.scatter(points[:, 0], points[:, 1], c=label_color)
all_subspace_gridpoints = subspace_gridpoints_all["visualization"]
subspace_gridpoints = all_subspace_gridpoints[subspace_order]
# create gridpoints
xx, yy = np.meshgrid(subspace_gridpoints[0],
subspace_gridpoints[1])
# Put the result into a color plot
# bring the scores to a valid form for contour()
Z = np.array(dict["score_subspace_grids"][subspace_order])
Z[Z > 0] = 1
Z[Z <= 0] = 0
Z = Z.reshape(xx.shape)
plt.contour(xx, yy, Z, cmap=plt.cm.Paired)
# plot the grid
plt.grid(linestyle='dotted')
# plot the labels
xlabel = plt.xlabel(headers[feature_x_id], fontsize=22)
ylabel = plt.ylabel(headers[feature_y_id], fontsize=22)
df = pd.read_csv(feature_file, sep=',', skiprows=0, header=0)
tooltip_content = []
# make tooltips
for i in range(len(objects)):
label = df.ix[[i], :].T
label.columns = ['Object {0}'.format(i)]
# .to_html() is unicode; so make leading 'u' go away with str()
tooltip_content.append(str(label.to_html()))
if setup.feature_data_visible == "Yes":
# all feature data is available in tooltip
tooltip = mpld3.plugins.PointHTMLTooltip(scatter, tooltip_content,
voffset=-300, hoffset=10)
else:
# only x and y features are available in tooltip
minimal_labels = [
[headers[feature_x_id] + ": " + str(i[0]) + ", " + headers[feature_y_id] + ": " + str(i[1])]
for i in objects]
if dataset_type == "HIPE":
for i in range(len(timestamps)):
# append also ids in restricted tooltip for better acknowledgement
minimal_labels[i].append(" id: " + str(timestamps[i]))
tooltip = mpld3.plugins.PointHTMLTooltip(points=scatter, labels=minimal_labels, hoffset=10)
a = []
# only send available subspaces to the information field
if selected_obj != None:
if dataset_type == "HIPE":
a.append(xy[selected_obj][0]) # id
for feature in features:
a.append(xy[selected_obj][feature])
xy = a
b = []
if dataset_type == "HIPE":
b.append(headers[0]) # id
for feature in features:
b.append(headers[feature])
headers = b
mousepos = mpld3.plugins.MousePosition(fontsize=12, fmt='.3g')
mpld3.plugins.connect(fig, tooltip, mousepos)
if setup.feedback_mode != "system":
mpld3.plugins.connect(fig, ClickInfo(scatter))
figid = "graph_subspace_" + subspace_id + "_div"
html_graph = mpld3.fig_to_html(fig, figid=figid, template_type="best")
plt.close()
return html_graph, xy, headers, figid, tooltip_content, subspace_id
def get_raw_data_visualization(self, dataset, obj_id):
if dataset.type == 'MNIST':
raw_file = dataset.raw_file
file_text = raw_file.read()
dict_pixels = json.loads(file_text)
fig_pixels = dict_pixels[obj_id]
pixels = list()
for pixel in fig_pixels:
new_pixel = 255 * (1.0 - pixel)
pixels.append(new_pixel)
pixels = np.array(pixels)
pixels.resize((28, 28))
im = Image.fromarray(pixels.astype(np.uint8), mode='L')
im = im.resize((140, 140))
im = ImageOps.expand(im, border=50, fill='black')
path = "media/mnist_" + str(dataset.id) + "_" + str(obj_id) + ".png"
im.save(path) # temp save
return path
elif dataset.type == 'HIPE':
plt.close("all")
with open(dataset.raw_file.path) as data_file:
hipejson = json.load(data_file)
hipedf = pd.DataFrame(hipejson)
hipedf_t = hipedf.T
hipedf_t.iloc[obj_id]['SensorDateTime'] = pd.to_datetime(hipedf_t.iloc[obj_id]['SensorDateTime'],
errors='coerce')
hipedf_t.iloc[obj_id]['SensorDateTime'] = hipedf_t.iloc[obj_id]['SensorDateTime'].to_pydatetime()
for col in hipedf_t.columns:
if col != 'SensorDateTime':
fig = pyplot.figure()
hipedf_t.iloc[obj_id][col] = | pd.to_numeric(hipedf_t.iloc[obj_id][col]) | pandas.to_numeric |
# -*- coding: utf-8 -*-
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.base import BaseEstimator, TransformerMixin
from karura.core.insight import Insight
from karura.core.dataframe_extension import FType
class DatetimeToCategoricalInsight(Insight):
def __init__(self):
super().__init__()
self.index.as_preprocessing()
self.automatic = True
self._categorized = []
def adopt(self, dfe, interpreted=None):
targets = self.get_insight_targets(dfe)
self._categorized = targets
for t in targets:
month = dfe.df[t].dt.month.rename(t + "_month").astype("category")
day = dfe.df[t].dt.day.rename(t + "_day").astype("category")
dfe.df = | pd.concat([dfe.df, month, day], axis=1) | pandas.concat |
from os import path
from keras.engine.saving import model_from_json
from pandas import read_csv, get_dummies, DataFrame
from keras import models, layers, optimizers
from matplotlib.pyplot import figure, ylabel, plot, legend, show, xlabel
from numpy import reshape, argmax, ndarray
# These are the file names our model is
# going to be stored as
model_struct_f = "model.json"
weights_f = "weights.h5"
# If both of the required files exist, then we
# recreate our pre-trained model from them and
# ask for user input of a vector
if path.isfile(model_struct_f) and path.isfile(weights_f):
print("Loading model struct and weights")
with open(model_struct_f, 'r') as f:
model = model_from_json(f.read())
model.load_weights(weights_f)
# This lets us know that the model is ready
print("The model is ready to make predictions!\nInput four floats delimited with spaces: (eg.: 6.9 3.1 4.9 1.5)\n")
# Read four digits from stdin into an array
# and reshape it to our model's input shape
array = reshape(list(map(float, input().split())), (1, 4))
# This will return an (1, 3) shape array with each column
# representing how likely the input belongs to each class
probabilities = model.predict(array)
# Get the index of the highest value in our probabilities array
# This will also be the index of the category or class name
max_arg_i = argmax(probabilities)
# If max_arg_i is an array it means there was more than one max
# prob. values. In this case, just use the first one as the index
if max_arg_i is ndarray:
max_arg_i = max_arg_i[0]
# These are just for pretty-printing the output in a human
# readable format
cat = ['Iris-setosa', 'Iris-versicolor', 'Iris-virginica'][max_arg_i]
confidence = probabilities[0, max_arg_i]
print("I think it's an %s with %.3f%% confidence" % (cat, confidence))
else:
# Load data from the internet
data = read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data", header=None)
# Shuffle data to avoid overfitting
data = data.sample(frac=1).reset_index(drop=True)
# Let's take a look at our data
print(data.head(10))
# the 0th, 1st, 2nd, 3th cols are the feature columns
feature_cols = [0, 1, 2, 3]
label_cols = [4]
# replace missing values with the median of all values
# in each feature column
data[feature_cols].fillna(data[feature_cols].dropna().median(), inplace=True)
# separate features and labels and OHE encode labels
# We could use column slicing like this:
# x = data.values[:, :3] # train data
# y = data.values[:, 4] # labels
# but since we have already named our feature columns, we can say:
x = data[feature_cols].values
# We could now say, well the labels are all the data cols except
# the feature columns like this:
# y = data[data.columns.difference(feature_cols)].values
# but it's much easier to just define the index of the label columns
# y = data[label_cols] <--- omitted ---> to avoid code duplication
y = | get_dummies(data[label_cols], columns=label_cols) | pandas.get_dummies |
import numpy as np
import pandas as pd
from candidate_handler import candidates_update
#interdependent modules, it has to be imported like below,otherwise it wont work
import solver as solver
from cells_seen import cells_seen
import itertools
#%% Simple Colouring (Singles Chains)
# finds and returns conjugate pairs (strong links)
def conjugate_pairs(cands,board):
# find strong links for each value
strlinx_rows = []
strlinx_cols = []
strlinx_boxs = []
for val in range(1,10):
#go through all rows
strlinx_row = []
for row in range(9):
use = cands.loc[row].dropna()
temp = []
for i in use:
temp.extend(i)
valco = pd.Series(temp).value_counts()
try:
if valco.loc[val] == 2:
inx_temp = use.apply(lambda x: val in x)
inx = inx_temp.index[inx_temp == True]
# print(f"value: {val} row: {row} strong link columns: {inx}")
strlinx_row.append([(row,inx[0]),(row,inx[1])])
except:
pass
strlinx_rows.append(strlinx_row)
#go through all cols
strlinx_col = []
for col in range(9):
use = cands[col].dropna()
temp = []
for i in use:
temp.extend(i)
valco = pd.Series(temp).value_counts()
try:
if valco.loc[val] == 2:
inx_temp = use.apply(lambda x: val in x)
inx = inx_temp.index[inx_temp == True]
# print(f"value: {val} col: {col} strong link indexes: {inx}")
strlinx_col.append([(inx[0],col),(inx[1],col)])
except:
pass
strlinx_cols.append(strlinx_col)
#go through all boxes
strlinx_box = []
for i in [[0,1,2],[3,4,5],[6,7,8]]:
for j in [[0,1,2],[3,4,5],[6,7,8]]:
use_box = cands.iloc[i,j]
use = pd.Series(use_box.values.flatten()).dropna()
temp = []
for ix in use:
temp.extend(ix)
valco = pd.Series(temp).value_counts()
try:
if valco.loc[val] == 2:
tempinx = []
for ir in use_box.index:
for ic in use_box.columns:
if board.iloc[ir,ic] == ".":
if val in use_box.loc[ir][ic]:
# print(f"value: {val} strong link indexes: R{ir}C{ic}")
tempinx.append((ir,ic))
strlinx_box.append(tempinx)
except:
pass
strlinx_boxs.append(strlinx_box)
return strlinx_rows,strlinx_cols,strlinx_boxs
#auxiliary function used by the recursive function
def colour_cell(boxrowcol,i,search,match_matrix,val,sl_uniq,strlinx_rows,strlinx_cols,strlinx_boxs):
i = pd.Series(i)
inx = i.index[i == search]
try:
pair = i[abs(inx-1)].tolist()[0]
if len(pair) and pd.isnull(match_matrix.iloc[pair]):
# print(f"{boxrowcol}: Connection from {search} to {pair} for {val+1}")
#colour conjugate pairs with the same group but different colour
group = match_matrix.iloc[search][1]
colour = colours[abs(colours.index[match_matrix.iloc[search][0] == colours][0]-1)]+str(group)
match_matrix.iloc[pair] = colour
match_matrix = simple_colouring(val,pair,strlinx_rows,strlinx_cols,strlinx_boxs,sl_uniq,match_matrix)
except:
pass
return match_matrix
#the recursive function searches for the cell
def simple_colouring(val,search,strlinx_rows,strlinx_cols,strlinx_boxs,sl_uniq,match_matrix):
#first start searching it in the boxes
for i in strlinx_boxs[val]:
match_matrix = colour_cell("box",i,search,match_matrix,val,sl_uniq,strlinx_rows,strlinx_cols,strlinx_boxs)
#then rows
for i in strlinx_rows[val]:
match_matrix = colour_cell("row",i,search,match_matrix,val,sl_uniq,strlinx_rows,strlinx_cols,strlinx_boxs)
#then cols
for i in strlinx_cols[val]:
match_matrix = colour_cell("col",i,search,match_matrix,val,sl_uniq,strlinx_rows,strlinx_cols,strlinx_boxs)
return match_matrix
def singles_chains_eliminate(sl_uniq,match_matrix,rem,cands,square_pos):
ischanged = 0
for rows in cands.index:
for cols in cands.columns:
use = cands.iloc[rows,cols]
try:
chk = not | pd.isnull(use) | pandas.isnull |
import unittest
from abc import ABC
import numpy as np
import pandas as pd
from toolbox.ml.ml_factor_calculation import ModelWrapper, calc_ml_factor, generate_indexes
from toolbox.utils.slice_holder import SliceHolder
class MyTestCase(unittest.TestCase):
def examples(self):
# index includes non trading days
# exactly 60 occurrences of each ticker
first = pd.Timestamp(year=2010, month=1, day=1)
self.date_index = pd.MultiIndex.from_product(
[pd.date_range(start=first, end=pd.Timestamp(year=2010, month=3, day=1)),
['BOB', 'JEFF', 'CARL']], names=['date', 'symbol'])
self.expected_index_e5_10_30 = [
(SliceHolder(first, first + pd.Timedelta(days=29)),
SliceHolder(first + pd.Timedelta(days=40), first + pd.Timedelta(days=44))),
(SliceHolder(first, first + pd.Timedelta(days=34)),
SliceHolder(first + pd.Timedelta(days=45), first + pd.Timedelta(days=49))),
(SliceHolder(first, first + pd.Timedelta(days=39)),
SliceHolder(first + pd.Timedelta(days=50), first + pd.Timedelta(days=54))),
(SliceHolder(first, first + pd.Timedelta(days=44)),
SliceHolder(first + pd.Timedelta(days=55), first + pd.Timedelta(days=59)))
]
self.expected_index_e7_8_30 = [
(SliceHolder(first, first + pd.Timedelta(days=29)),
SliceHolder(first + pd.Timedelta(days=37), first + pd.Timedelta(days=44))),
(SliceHolder(first, first + pd.Timedelta(days=37)),
SliceHolder(first + pd.Timedelta(days=45), first + pd.Timedelta(days=52))),
(SliceHolder(first, first + pd.Timedelta(days=45)),
SliceHolder(first + pd.Timedelta(days=53), first + pd.Timedelta(days=59))),
]
self.expected_index_e5_10_30 = self.turn_to_datetime64(self.expected_index_e5_10_30)
self.expected_index_e7_8_30 = self.turn_to_datetime64(self.expected_index_e7_8_30)
self.expected_index_r5_10_30 = [
(SliceHolder(first, first + pd.Timedelta(days=29)),
SliceHolder(first + pd.Timedelta(days=40), first + pd.Timedelta(days=44))),
(SliceHolder(first + pd.Timedelta(days=5), first + pd.Timedelta(days=34)),
SliceHolder(first + pd.Timedelta(days=45), first + pd.Timedelta(days=49))),
(SliceHolder(first + pd.Timedelta(days=10), first + pd.Timedelta(days=39)),
SliceHolder(first + pd.Timedelta(days=50), first + pd.Timedelta(days=54))),
(SliceHolder(first + pd.Timedelta(days=15), first + pd.Timedelta(days=44)),
SliceHolder(first + | pd.Timedelta(days=55) | pandas.Timedelta |
#!/usr/bin/python
import sys
import re
import pandas as pd
import numpy as np
native_readcount_fwd = pd.read_csv(sys.argv[1], sep="\t", header=None, skiprows = [0,1,2], usecols=range(5), index_col = 0, names=['Position','A','C','G','T'])
native_readcount_rev = pd.read_csv(sys.argv[2], sep="\t", header=None, skiprows = [0,1,2], usecols=range(5), index_col = 0, names=['Position','A','C','G','T'])
pcr_readcount_fwd = pd.read_csv(sys.argv[3], sep="\t", header=None, skiprows = [0,1,2], usecols=range(5), index_col = 0, names=['Position','A','C','G','T'])
pcr_readcount_rev = pd.read_csv(sys.argv[4], sep="\t", header=None, skiprows = [0,1,2], usecols=range(5), index_col = 0, names=['Position','A','C','G','T'])
ref_fasta = sys.argv[5]
output_file = sys.argv[6]
with open(ref_fasta, 'r') as ref:
ref_seq=''
for line in ref:
if line[0] == '>':
chrom_name = line.rstrip().split(' ')[0][1:]
if line[0] != '>':
ref_seq += line.rstrip()
nrow = len(ref_seq)
if len(native_readcount_fwd) < nrow:
missing_row_index = list(set(range(1,nrow + 1)) - set(native_readcount_fwd.index.tolist()))
df_with_missing_row = pd.DataFrame(np.zeros((len(missing_row_index),len(native_readcount_fwd.columns.tolist()))),
index = missing_row_index,
columns = native_readcount_fwd.columns.tolist())
native_readcount_fwd = pd.concat([native_readcount_fwd, df_with_missing_row]).sort_index()
if len(native_readcount_rev) < nrow:
missing_row_index = list(set(range(1,nrow + 1)) - set(native_readcount_rev.index.tolist()))
df_with_missing_row = pd.DataFrame(np.zeros((len(missing_row_index),len(native_readcount_rev.columns.tolist()))),
index = missing_row_index,
columns = native_readcount_rev.columns.tolist())
native_readcount_rev = pd.concat([native_readcount_rev, df_with_missing_row]).sort_index()
if len(pcr_readcount_fwd) < nrow:
missing_row_index = list(set(range(1,nrow + 1)) - set(pcr_readcount_fwd.index.tolist()))
df_with_missing_row = pd.DataFrame(np.zeros((len(missing_row_index),len(pcr_readcount_fwd.columns.tolist()))),
index = missing_row_index,
columns = pcr_readcount_fwd.columns.tolist())
pcr_readcount_fwd = pd.concat([pcr_readcount_fwd, df_with_missing_row]).sort_index()
if len(pcr_readcount_rev) < nrow:
missing_row_index = list(set(range(1,nrow + 1)) - set(pcr_readcount_rev.index.tolist()))
df_with_missing_row = pd.DataFrame(np.zeros((len(missing_row_index),len(pcr_readcount_rev.columns.tolist()))),
index = missing_row_index,
columns = pcr_readcount_rev.columns.tolist())
pcr_readcount_rev = | pd.concat([pcr_readcount_rev, df_with_missing_row]) | pandas.concat |
""" RNA-Seq analysis pipeline following the methods used by ICGC.
This script works specifically for the mice data on the JLU server.
Some of the functions could be rewritten for general use.
Function:
1. Walk through DATA_DIR and generate a design matrix.
2. Concatenate samples with multiple fastq files (splitted lanes).
3. Run fastp on all fq.gz files, and store the trimmed file under RES_DIR/data.
Reports of fastp are stored in RES_DIR/fastp.
4. Align each sample using a two-pass method with STAR
Output:
Under `RES_DIR/data` and `RES_DIR/fastp`, we have the trimmed FASTQ files and their
corresponding fastp reports. MultiQC reports is under `RES_DIR/fastp/multiqc`.
Under `RES_DIR/bam`, each sample should have its own sub-directory containing
the following files with the `sample_group` as a prefix:
- Aligned.out.bam: all genomic alignments including chimeric and unaligned reads
- Aligned.toTranscriptome.out.bam: aligned reads with transcript coordinates rather than genomic coordinates
- Chimeric.out.junction: reads that were mapped to different chromosomes or strands (fusion alignments)
- SJ.out.tab: high confidence collapsed splice junctions
- Log(.final|.progress).out
Under `RES_DIR/counts`, the counts produced by STAR is moved here.
See https://www.biostars.org/p/218995/. In our case cols 1 and 2 should be kept.
Under `RES_DIR/tpm`, TPM values produced by Salmon is stored.
Ensembl transcript IDs are used because it's mapped to the reference transcriptome.
Software and data:
- fastp v0.20.0: https://github.com/OpenGene/fastp
- FastQC v0.11.9: http://www.bioinformatics.babraham.ac.uk/projects/fastqc/fastqc_v0.11.9.zip
- multiQC v1.8: https://multiqc.info
- STAR v2.7.2b: https://github.com/alexdobin/STAR
- Salmon v1.1.0: https://github.com/COMBINE-lab/salmon/releases/download/v1.1.0/salmon-1.1.0_linux_x86_64.tar.gz
- Reference genome: ftp://ftp.ebi.ac.uk/pub/databases/gencode/Gencode_mouse/release_M24/GRCm38.primary_assembly.genome.fa.gz
- Reference transcriptome: ftp://ftp.ebi.ac.uk/pub/databases/gencode/Gencode_mouse/release_M24/gencode.vM24.transcripts.fa.gz
- Gene annotation: ftp://ftp.ebi.ac.uk/pub/databases/gencode/Gencode_mouse/release_M24/gencode.vM24.annotation.gtf.gz
References:
https://docs.gdc.cancer.gov/Data/Bioinformatics_Pipelines/Expression_mRNA_Pipeline/
https://github.com/akahles/icgc_rnaseq_align/blob/master/star_align.py
https://salmon.readthedocs.io/en/latest/salmon.html
https://combine-lab.github.io/alevin-tutorial/2019/selective-alignment/
Choice of software: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4728800/
"""
import glob
import gzip
import logging
import os
import re
import shutil
import subprocess
from typing import List
import pandas as pd
# Filepath variables
WORK_DIR = os.path.expanduser("~/rna_seq")
DATA_DIR = os.path.join(WORK_DIR, "mm_liver_data")
RES_DIR = os.path.join(WORK_DIR, "results")
REFERENCE_GENOME_PATH = os.path.join(WORK_DIR, "genome", "GRCm38.p6.genome.fa")
REFERENCE_TRANSCRIPTOME_PATH = os.path.join(
WORK_DIR, "genome", "gencode.vM24.transcripts.fa"
)
GENCODE_PATH = os.path.join(WORK_DIR, "genome", "gencode.vM24.annotation.gtf")
STAR_INDEX_DIR = os.path.join(WORK_DIR, "star_index")
FASTP_PATH = os.path.expanduser("~/pkg/bin/fastp")
FASTQC_PATH = os.path.expanduser("~/pkg/bin/fastqc")
MULTIQC_PATH = os.path.expanduser("~/.local/bin/multiqc")
STAR_PATH = os.path.expanduser("~/pkg/bin/STAR")
SALMON_PATH = os.path.expanduser("~/pkg/bin/salmon/bin/salmon")
for d in [
f"{RES_DIR}/data",
f"{RES_DIR}/fastp/multiqc",
f"{RES_DIR}/fastqc",
f"{RES_DIR}/bam",
f"{RES_DIR}/counts",
f"{RES_DIR}/tpm",
STAR_INDEX_DIR,
]:
os.makedirs(d, exist_ok=True)
# Log settings
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.FileHandler(os.path.join(f"{__file__.rstrip('.py')}.log"))
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s [%(levelname)s]{%(lineno)d:%(funcName)12s()} %(message)s",
"%Y-%m-%d %H:%M:%S",
)
handler.setFormatter(formatter)
logger.addHandler(handler)
def ListFastqFiles(group: str = "original") -> List[List[str]]:
""" List fastq files to be analyzed in the current working directory.
Folders could contain multiple files for the same sample,
which should be joined as they are from multiple lanes.
Returns:
A list of lists of absolute paths to the fq.gz files. Each sublist
contains files in the same directory.
"""
# Find the lowest-level directories containing the files
all_files = glob.glob(f"{DATA_DIR}/{group}/**/FCH*.fq.gz", recursive=True)
dir_names = set((os.path.dirname(x) for x in all_files))
res = []
for dir_name in dir_names:
# Assuming we don't have any missing files
uniq_samples = list(
set(
(
re.sub(r"^(.*)_[12]\.fq\.gz$", r"\1", x)
for x in glob.glob(f"{dir_name}/FCH*.fq.gz")
)
)
)
logger.debug(f"Found {len(uniq_samples)} for {dir_name}")
res.append(uniq_samples)
return res
def Samples2DesignMatrix(samples: List[List[str]]) -> pd.DataFrame:
""" Convert the samples list to a design matrix.
Args:
samples (List[List[str]]): return from function `ListFastqFiles`.
Returns:
pd.DataFrame: a design matrix with the following columns:
- timepoint: 1 - 9
- sample_type: Control / Model / Genpin
- sample_group: e.g. original/T1_C/11_90bp
- filename: comma-separated filenames (_[1|2] removed)
"""
_samples = [x for subl in samples for x in subl]
_samples = [re.sub(f"{DATA_DIR}/", "", x) for x in _samples]
filenames = [os.path.basename(x) for x in _samples]
sample_groups = [os.path.dirname(x) for x in _samples]
possible_sample_types = {"C": "Control", "M": "Model", "G": "Genpin"}
timepoints = []
sample_type = []
for sg in sample_groups:
if sg.startswith("original"):
tmp = re.search("/T(\d+)_([CMG])", sg)
timepoints.append(tmp.group(1))
sample_type.append(possible_sample_types[tmp.group(2)])
else: # A "new" sample
tmp = re.search("/Time_(\d+)_.*/(Control|Model)", sg)
timepoints.append(tmp.group(1))
sample_type.append(tmp.group(2))
df = pd.DataFrame(
{
"timepoint": timepoints,
"sample_type": sample_type, # Control/Model
"sample_group": sample_groups, #
"filename": filenames, # FCH*_[1|2].fq.gz
}
)
df = df.sort_values(by=["timepoint", "sample_type", "sample_group"])
df = (
df.groupby(["timepoint", "sample_type", "sample_group"])["filename"]
.agg(",".join)
.reset_index()
)
return df
def ConcatSamples(samples: List[str]):
""" Concatenate samples in the same group to two fq.gz files.
Files are cat together because they are the same sample split to multiple
lanes. The concatenate files are for the forward and reverse reads.
Args:
samples (List[str]): comma-separated absolute file paths with the
trailing _[1|2].fq.gz stripped
Returns:
the `sample_group` of the concatenated files.
"""
f_out = f"{os.path.dirname(samples[0])}/"
sample_group = re.sub(DATA_DIR, "", f_out).strip("/")
if not os.path.exists(f"{f_out}merged_1.fq.gz"):
f_of_sample = " ".join([f"{x}_1.fq.gz" for x in samples])
cmd1 = f"cat {f_of_sample} > {f_out}merged_1.fq.gz"
subprocess.check_call(cmd1, shell=True)
logger.info(f"Concatenated fastq files for {sample_group}_1")
if not os.path.exists(f"{f_out}merged_2.fq.gz"):
f_of_sample = " ".join([f"{x}_2.fq.gz" for x in samples])
cmd2 = f"cat {f_of_sample} > {f_out}merged_2.fq.gz"
subprocess.check_call(cmd2, shell=True)
logger.info(f"Concatenated fastq files for {sample_group}_2")
return sample_group
if __name__ == "__main__":
logger.info("\x1b[31;1m" + "/*** GDC RNA-Seq pipeline started! ***/" + "\x1b[0m")
###################################################################
# Get design matrix #
###################################################################
logger.info("\x1b[33;21m" + "Step 1: get design matrix" + "\x1b[0m")
if os.path.exists(f"{DATA_DIR}/design_matrix.csv"):
design_mat = pd.read_csv(f"{DATA_DIR}/design_matrix.csv")
logger.info("Read design matrix from file")
else:
# Get sample paths for original and new files
original_samples = ListFastqFiles(group="original")
new_samples = ListFastqFiles(group="new")
all_samples = original_samples + new_samples
# Make design matrix of raw data files
design_mat = Samples2DesignMatrix(all_samples)
design_mat.to_csv(f"{DATA_DIR}/design_matrix.csv", index=False)
logger.info("Created design matrix")
###################################################################
# Concatenate multi-lane samples #
###################################################################
logger.info("\x1b[33;21m" + "Step 2: concatenate multi-lane samples" + "\x1b[0m")
sm_multi_lanes = design_mat[design_mat["filename"].str.contains(",")]
filenames = sm_multi_lanes.apply(
lambda x: [
f"{DATA_DIR}/{x['sample_group']}/{ele}" for ele in x["filename"].split(",")
],
axis=1,
).tolist()
for f in filenames:
sg = ConcatSamples(f)
design_mat.loc[design_mat["sample_group"] == sg, "filename"] = "merged"
assert not any(design_mat.filename.str.contains(","))
###################################################################
# Run fastp on all fq files #
###################################################################
logger.info("\x1b[33;21m" + "Step 3: QC and preprocess with fastp" + "\x1b[0m")
filenames = (
DATA_DIR + "/" + design_mat["sample_group"] + "/" + design_mat["filename"]
)
filenames = filenames.tolist()
sample_groups = [x.replace("/", "_") for x in design_mat["sample_group"]]
for i, (f, sg) in enumerate(zip(filenames, sample_groups)):
if os.path.exists(f"{RES_DIR}/fastp/{sg}_fastp.json"):
continue
logger.info(f"Running fastp on sample {sg}...")
subprocess.check_call(
f"""{FASTP_PATH} -V -i {f}_1.fq.gz -I {f}_2.fq.gz \
-o {RES_DIR}/data/{sg}_1.fq.gz \
-O {RES_DIR}/data/{sg}_2.fq.gz \
--html {RES_DIR}/fastp/{sg}_fastp.html \
--json {RES_DIR}/fastp/{sg}_fastp.json \
-w 4""",
shell=True,
)
logger.info(f"Generated fastp report for sample {sg}")
subprocess.check_call(
f"{MULTIQC_PATH} {RES_DIR}/fastp/ -m fastp -o {RES_DIR}/fastp/multiqc/",
shell=True,
)
if not os.path.exists(f"{RES_DIR}/fastqc/multiqc_report.html"):
subprocess.check_call(
f"{FASTQC_PATH} {RES_DIR}/data/* --noextract -o {RES_DIR}/fastqc/ -t 4"
)
subprocess.check_call(
f"{MULTIQC_PATH} {RES_DIR}/fastp/ -m fastqc -o {RES_DIR}/fastqc/",
shell=True,
)
###################################################################
# Align sequences and call counts #
###################################################################
logger.info("\x1b[33;21m" + "Step 4: STAR alignment" + "\x1b[0m")
# Build the STAR index if it's not already built
if not os.path.exists(f"{STAR_INDEX_DIR}/Genome"):
logger.info("STAR index not found. Building now...")
subprocess.check_call(
f"""STAR \
--runMode genomeGenerate \
--genomeDir {STAR_INDEX_DIR} \
--genomeFastaFiles {REFERENCE_GENOME_PATH} \
--sjdbOverhang 100 \
--sjdbGTFfile {GENCODE_PATH} \
--runThreadN 8 \
--outFileNamePrefix {WORK_DIR}/logs/star_index""",
shell=True,
)
logger.info(f"STAR index built to {STAR_INDEX_DIR}")
# Run STAR for each sample if output files are not found
sg_sms = design_mat["sample_group"].apply(os.path.basename)
for i, (sg, sm) in enumerate(zip(sample_groups, sg_sms)):
if os.path.exists(f"{RES_DIR}/counts/{sg}.tsv") and os.path.exists(
f"{RES_DIR}/bam/{sg}"
):
continue
os.makedirs(f"{RES_DIR}/bam/{sg}", exist_ok=True)
logger.info(f"Aligning sample {sg}")
subprocess.check_call(
f"""{STAR_PATH} \
--readFilesIn {RES_DIR}/data/{sg}_1.fq.gz {RES_DIR}/data/{sg}_2.fq.gz \
--outSAMattrRGline ID:{sg} SM:{sm} \
--alignIntronMax 1000000 \
--alignIntronMin 20 \
--alignMatesGapMax 1000000 \
--alignSJDBoverhangMin 1 \
--alignSJoverhangMin 8 \
--alignSoftClipAtReferenceEnds Yes \
--chimJunctionOverhangMin 15 \
--chimMainSegmentMultNmax 1 \
--chimOutType Junctions SeparateSAMold WithinBAM SoftClip \
--chimSegmentMin 15 \
--genomeDir {STAR_INDEX_DIR} \
--genomeLoad NoSharedMemory \
--limitSjdbInsertNsj 1200000 \
--outFileNamePrefix {RES_DIR}/bam/{sg}/{sg} \
--outFilterIntronMotifs None \
--outFilterMatchNminOverLread 0.33 \
--outFilterMismatchNmax 999 \
--outFilterMismatchNoverLmax 0.1 \
--outFilterMultimapNmax 20 \
--outFilterScoreMinOverLread 0.33 \
--outFilterType BySJout \
--outSAMattributes NH HI AS nM NM ch \
--outSAMstrandField intronMotif \
--outSAMtype BAM Unsorted \
--outSAMunmapped Within \
--quantMode TranscriptomeSAM GeneCounts \
--readFilesCommand zcat \
--runThreadN 8 \
--twopassMode Basic""",
shell=True,
)
shutil.move(
f"{RES_DIR}/bam/{sg}/{sg}ReadsPerGene.out.tab", f"{RES_DIR}/counts/{sg}.tsv"
)
logger.info(f"Counts for sample {sg} generated")
###################################################################
# Get TPM using Salmon #
###################################################################
logger.info("\x1b[33;21m" + "Step 5: Get TPM values using Salmon" + "\x1b[0m")
for sg in sample_groups:
if os.path.exists(f"{RES_DIR}/tpm/{sg}"):
continue
logger.info(f"Calling TPM values for sample {sg}")
subprocess.check_call(
f"""{SALMON_PATH} quant \
-t {REFERENCE_TRANSCRIPTOME_PATH} \
-l A \
-a {RES_DIR}/bam/{sg}/{sg}Aligned.toTranscriptome.out.bam \
-o {RES_DIR}/tpm/{sg} \
--gencode""",
shell=True,
)
###################################################################
# Combine counts and TPM tables #
###################################################################
logger.info("\x1b[33;21m" + "Step 6: Combine counts and TPM tables" + "\x1b[0m")
counts_table = []
tpm_table = []
for sg in sample_groups:
# Combine counts files into one table
df = | pd.read_table(f"{RES_DIR}/counts/{sg}.tsv", header=None) | pandas.read_table |
import pandas as pd
from datetime import datetime
import time
import matplotlib.pyplot as plt
from sklearn.preprocessing import Normalizer,MinMaxScaler
from imblearn.over_sampling import SMOTE
from sklearn.utils import shuffle
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import classification_report
import numpy as np
import seaborn as sns
import plotly.graph_objs as go
import plotly.plotly as py
import plotly
from sklearn.externals import joblib
import warnings
warnings.filterwarnings("ignore")
#get_ipython().run_line_magic('matplotlib', 'inline')
#fd-future data set
#validating-0 or 1 (0-tetsing ,1= future prediction)
def flood_classifier(filename,fd,validating=0):
data1=pd.read_excel('data/'+filename+'.xlsx')
# In[4]:
data1.shape
# In[5]:
#Fillng null entries with mean of their respective columns
for i in range(1,len(data1.columns)):
data1[data1.columns[i]] = data1[data1.columns[i]].fillna(data1[data1.columns[i]].mean())
# In[6]:
data1.describe()
# In[7]:
y=data1['Flood']
# In[8]:
for i in range(len(y)):
if(y[i] >= 0.1):
y[i]=1
# In[9]:
y=pd.DataFrame(y)
data1.drop('Flood',axis=1,inplace=True)
# In[10]:
data1.head()
# In[11]:
data1.hist(figsize=(6,6));
#Breaking Date column into timestamp
d1= | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import matplotlib.pyplot as pls
import pandas as pd
import warnings
from IPython.display import display, HTML
import seaborn as sns
import lightgbm as lgb
from lightgbm import LGBMClassifier,LGBMRegressor
import shap
from .eda_anova import anova,two_way_anova,turkeyHSD
warnings.filterwarnings("ignore")
#=====================#=====================#=====================
# single dataset eda
#=====================#=====================#=====================
#single dataset report
def report(df,target=None,ignore=[],nbrmax=20):
do_eda(df,target,ignore,nbrmax)
#=====================#=====================#=====================#
# shap
#=====================#=====================#=====================#
#shap values
def plot_shaps(x, target,ignore=[],nbrmax=None,dependency=True):
features=x.columns.to_list()
features.remove(target)
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
#doesn't work on time columns, remove id columns (all values are different), columns with all nulls
for f in x.columns.to_list():
if (isTime(x[f].dtype) or x[f].isnull().values.all() or (len(x[f].unique())>x.shape[0]/2.0 and str(x[f].dtype) not in numerics)) and f in features:
features.remove(f)
features=list(set(features)-set(ignore))
[print('Feature name {} cantains special JSON characters - Skip'.format(x)) for x in features if ':' in x ]
features=[ x for x in features if not ':' in x ]
#list of categorical features
categorical_features=x[features].select_dtypes(exclude=numerics).columns.to_list()
#change type to categorical for lightgbm
backup={}
for c in categorical_features:
backup[c]=x[c].dtype
x[c] = x[c].astype('category')
target_type,target_cardinality,_=get_feature_info(x,target)
binary_target=(target_type=='Numeric' and target_cardinality==2)
if nbrmax==None:
if len(features)>20:
print('Shap values for 20 most important features will be plotted. If you need more please set nbrmax parameter')
nbrmax=20
if binary_target:
clf = LGBMClassifier(
objective='binary'
,n_estimators=100
, min_data_in_leaf = 10
, min_sum_hessian_in_leaf = 10
, feature_fraction = 0.9
, bagging_fraction = 1
, bagging_freq = 1
, metric='auc'
, learning_rate = 0.03
, num_leaves = 19
, num_threads = 2
, nrounds = 500
)
else:
clf = LGBMRegressor(
n_estimators=100
, min_data_in_leaf = 10
, min_sum_hessian_in_leaf = 10
, feature_fraction = 0.9
, bagging_fraction = 1
, bagging_freq = 1
, learning_rate = 0.03
, num_leaves = 19
, num_threads = 2
, nrounds = 500
)
clf.fit(x[features], x[target])#,categorical_feature=categorical_features)
shap_values = shap.TreeExplainer(clf.booster_).shap_values(x[features])
shap.summary_plot(shap_values, x[features], max_display=nbrmax, auto_size_plot=True)
if binary_target:
vals= np.abs(shap_values).mean(0)
else:
vals= shap_values
feature_importance = pd.DataFrame(list(zip(x[features].columns, sum(vals))), columns=['col_name','feature_importance_vals'])
feature_importance.sort_values(by=['feature_importance_vals'], ascending=False,inplace=True)
sorted_features=feature_importance['col_name'].to_list()
X=x.copy()
if binary_target:
shap.summary_plot(shap_values[1], x[features])
if dependency:
for f in categorical_features:
X[f]= X[f].astype(object)
X[f]=pd.factorize(X[f])[0]
for name in sorted_features[:nbrmax]:
#continue
if name in categorical_features and x[name].astype(str).nunique()>100:
continue
fig, ax = pls.subplots(1,1,figsize=(20,10))
shap.dependence_plot(name, shap_values[1], X[features], display_features=x[features], interaction_index=None,ax=ax)
pls.show()
#restore type
for c in categorical_features:
x[c] = x[c].astype(backup[c])
return sorted_features
#=====================#=====================#=====================#=====================
# numerical continues
#=====================#=====================#=====================#=====================
def plot_cuts(df,feature,target,bins=None, figsize=(12,6)):
if bins==None:
bins=np.arange(df[feature].min(),df[feature].max(),(df[feature].max()-df[feature].min())/10.)
fig, (ax1, ax2) = pls.subplots(ncols=2, figsize=figsize)
pls.title('Histogram of {}'.format(feature));
ax1.set_xlabel(feature)
ax1.set_ylabel('count')
ax2.set_xlabel(feature)
ax2.set_ylabel(target)
df.groupby(pd.cut(df[feature], bins=bins))[target].count().plot(kind='bar',ax=ax1,grid=True)
df.groupby(pd.cut(df[feature], bins=bins))[target].mean().plot(kind='bar',ax=ax2,grid=True)
pls.show()
def plot_qcuts(df,feature,target,q=None, figsize=(8,4)):
if q==None:
q = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8,0.9, 1]
fig, (ax1, ax2) = pls.subplots(ncols=2, figsize=figsize)
pls.title('Histogram of {}'.format(feature));
ax1.set_xlabel(feature)
ax1.set_ylabel('count')
ax2.set_xlabel(feature)
ax2.set_ylabel(target)
df.groupby(pd.qcut(df[feature], q=q,duplicates='drop'))[target].count().plot(kind='bar',ax=ax1,grid=True)
df.groupby(pd.qcut(df[feature], q=q,duplicates='drop'))[target].mean( ).plot(kind='bar',ax=ax2,grid=True)
pls.show()
#=====================#=====================#=====================#=====================
# categorical
#=====================#=====================#=====================#=====================
def plot_stats(df,feature,target,max_nbr=20,sort='Count ',ax1=None,ax2=None):
end=max_nbr
createfig=(ax1==None or ax2==None)
cat_count = df[feature].value_counts().reset_index()
cat_count.columns = [feature,'Count ']
cat_count.sort_values(by=sort, ascending=False, inplace=True)
cat_perc = df[[feature, target]].groupby([feature],as_index=False).mean()
cat_perc= | pd.merge(cat_perc,cat_count,on=feature) | pandas.merge |
import logging
import os
import time
import warnings
from datetime import date, datetime, timedelta
from io import StringIO
from typing import Dict, Iterable, List, Optional, Union
from urllib.parse import urljoin
import numpy as np
import pandas as pd
import requests
import tables
from pvoutput.consts import (
BASE_URL,
CONFIG_FILENAME,
ONE_DAY,
PV_OUTPUT_DATE_FORMAT,
RATE_LIMIT_PARAMS_TO_API_HEADERS,
)
from pvoutput.daterange import DateRange, merge_date_ranges_to_years
from pvoutput.exceptions import NoStatusFound, RateLimitExceeded
from pvoutput.utils import (
_get_param_from_config_file,
_get_response,
_print_and_log,
get_date_ranges_to_download,
sort_and_de_dupe_pv_system,
system_id_to_hdf_key,
)
_LOG = logging.getLogger("pvoutput")
class PVOutput:
"""
Attributes:
api_key
system_id
rate_limit_remaining
rate_limit_total
rate_limit_reset_time
data_service_url
"""
def __init__(
self,
api_key: str = None,
system_id: str = None,
config_filename: Optional[str] = CONFIG_FILENAME,
data_service_url: Optional[str] = None,
):
"""
Args:
api_key: Your API key from PVOutput.org.
system_id: Your system ID from PVOutput.org. If you don't have a
PV system then you can register with PVOutput.org and select
the 'energy consumption only' box.
config_filename: Optional, the filename of the .yml config file.
data_service_url: Optional. If you have subscribed to
PVOutput.org's data service then add the data service URL here.
This string must end in '.org'.
"""
self.api_key = api_key
self.system_id = system_id
self.rate_limit_remaining = None
self.rate_limit_total = None
self.rate_limit_reset_time = None
self.data_service_url = data_service_url
# Set from config file if None
for param_name in ["api_key", "system_id"]:
if getattr(self, param_name) is None:
try:
param_value_from_config = _get_param_from_config_file(
param_name, config_filename
)
except Exception as e:
msg = (
"Error loading configuration parameter {param_name}"
" from config file {filename}. Either pass"
" {param_name} into PVOutput constructor, or create"
" config file {filename}. {exception}".format(
param_name=param_name, filename=CONFIG_FILENAME, exception=e
)
)
print(msg)
_LOG.exception(msg)
raise
setattr(self, param_name, param_value_from_config)
# Convert to strings
setattr(self, param_name, str(getattr(self, param_name)))
# Check for data_service_url
if self.data_service_url is None:
try:
self.data_service_url = _get_param_from_config_file(
"data_service_url", config_filename
)
except KeyError:
pass
except FileNotFoundError:
pass
if self.data_service_url is not None:
if not self.data_service_url.strip("/").endswith(".org"):
raise ValueError("data_service_url must end in '.org'")
def search(
self,
query: str,
lat: Optional[float] = None,
lon: Optional[float] = None,
include_country: bool = True,
**kwargs
) -> pd.DataFrame:
"""Search for PV systems.
Some quirks of the PVOutput.org API:
- The maximum number of results returned by PVOutput.org is 30.
If the number of returned results is 30, then there is no
indication of whether there are exactly 30 search results,
or if there are more than 30. Also, there is no way to
request additional 'pages' of search results.
- The maximum search radius is 25km
Args:
query: string, see https://pvoutput.org/help.html#search
e.g. '5km'.
lat: float, e.g. 52.0668589
lon: float, e.g. -1.3484038
include_country: bool, whether or not to include the country name
with the returned postcode.
Returns:
pd.DataFrame, one row per search results. Index is PV system ID.
Columns:
name,
system_DC_capacity_W,
address, # If `include_country` is True then address is
# 'country> <postcode>',
# else address is '<postcode>'.
orientation,
num_outputs,
last_output,
panel,
inverter,
distance_km,
latitude,
longitude
"""
api_params = {"q": query, "country": int(include_country)}
if lat is not None and lon is not None:
api_params["ll"] = "{:f},{:f}".format(lat, lon)
pv_systems_text = self._api_query(service="search", api_params=api_params, **kwargs)
pv_systems = pd.read_csv(
StringIO(pv_systems_text),
names=[
"name",
"system_DC_capacity_W",
"address",
"orientation",
"num_outputs",
"last_output",
"system_id",
"panel",
"inverter",
"distance_km",
"latitude",
"longitude",
],
index_col="system_id",
)
return pv_systems
def get_status(
self, pv_system_id: int, date: Union[str, datetime], historic: bool = True, **kwargs
) -> pd.DataFrame:
"""Get PV system status (e.g. power generation) for one day.
The returned DataFrame will be empty if the PVOutput API
returns 'status 400: No status found'.
Args:
pv_system_id: int
date: str in format YYYYMMDD; or datetime
(localtime of the PV system)
Returns:
pd.DataFrame:
index: datetime (DatetimeIndex, localtime of the PV system)
columns: (all np.float64):
cumulative_energy_gen_Wh,
energy_efficiency_kWh_per_kW,
instantaneous_power_gen_W,
average_power_gen_W,
power_gen_normalised,
energy_consumption_Wh,
power_demand_W,
temperature_C,
voltage
"""
_LOG.info("system_id %d: Requesting system status for %s", pv_system_id, date)
date = date_to_pvoutput_str(date)
_check_date(date)
api_params = {
"d": date, # date, YYYYMMDD, localtime of the PV system
"h": int(historic == True), # We want historical data.
"limit": 288, # API limit is 288 (num of 5-min periods per day).
"ext": 0, # Extended data; we don't want extended data.
"sid1": pv_system_id, # SystemID.
}
try:
pv_system_status_text = self._api_query(
service="getstatus", api_params=api_params, **kwargs
)
except NoStatusFound:
_LOG.info("system_id %d: No status found for date %s", pv_system_id, date)
pv_system_status_text = ""
# See https://pvoutput.org/help.html#api-getstatus but make sure
# you read the 'History Query' subsection, as a historical query
# has slightly different return columns compared to a non-historical
# query!
columns = (
[
"cumulative_energy_gen_Wh",
"energy_efficiency_kWh_per_kW",
"instantaneous_power_gen_W",
"average_power_gen_W",
"power_gen_normalised",
"energy_consumption_Wh",
"power_demand_W",
"temperature_C",
"voltage",
]
if historic
else [
"cumulative_energy_gen_Wh",
"instantaneous_power_gen_W",
"energy_consumption_Wh",
"power_demand_W",
"power_gen_normalised",
"temperature_C",
"voltage",
]
)
pv_system_status = pd.read_csv(
StringIO(pv_system_status_text),
lineterminator=";",
names=["date", "time"] + columns,
parse_dates={"datetime": ["date", "time"]},
index_col=["datetime"],
dtype={col: np.float64 for col in columns},
).sort_index()
return pv_system_status
def get_batch_status(
self,
pv_system_id: int,
date_to: Optional[Union[str, datetime]] = None,
max_retries: Optional[int] = 1000,
**kwargs
) -> Union[None, pd.DataFrame]:
"""Get batch PV system status (e.g. power generation).
The returned DataFrame will be empty if the PVOutput API
returns 'status 400: No status found'.
Data returned is limited to the last 366 days per request.
To retrieve older data, use the date_to parameter.
The PVOutput getbatchstatus API is asynchronous. When it's first
called, it replies to say 'accepted'. This function will then
wait a minute and call the API again to see if the data is ready.
Set `max_retries` to 1 if you want to return immediately, even
if data isn't ready yet (and hence this function will return None)
https://pvoutput.org/help.html#dataservice-getbatchstatus
Args:
pv_system_id: int
date_to: str in format YYYYMMDD; or datetime
(localtime of the PV system). The returned timeseries will
include 366 days of data: from YYYY-1MMDD to YYYYMMDD inclusive
max_retries: int, number of times to retry after receiving
a '202 Accepted' request. Set `max_retries` to 1 if you want
to return immediately, even if data isn't ready yet (and hence
this function will return None).
Returns:
None (if data isn't ready after retrying max_retries times) or
pd.DataFrame:
index: datetime (DatetimeIndex, localtime of the PV system)
columns: (all np.float64):
cumulative_energy_gen_Wh,
instantaneous_power_gen_W,
temperature_C,
voltage
"""
api_params = {"sid1": pv_system_id}
_set_date_param(date_to, api_params, "dt")
for retry in range(max_retries):
try:
pv_system_status_text = self._api_query(
service="getbatchstatus", api_params=api_params, use_data_service=True, **kwargs
)
except NoStatusFound:
_LOG.info("system_id %d: No status found for date_to %s", pv_system_id, date_to)
pv_system_status_text = ""
break
if "Accepted 202" in pv_system_status_text:
if retry == 0:
_print_and_log("Request accepted.")
if retry < max_retries - 1:
_print_and_log("Sleeping for 1 minute.")
time.sleep(60)
else:
_print_and_log(
"Call get_batch_status again in a minute to see if" " results are ready."
)
else:
break
else:
return
return _process_batch_status(pv_system_status_text)
def get_metadata(self, pv_system_id: int, **kwargs) -> pd.Series:
"""Get metadata for a single PV system.
Args:
pv_system_id: int
Returns:
pd.Series. Index is:
name,
system_DC_capacity_W,
address,
num_panels,
panel_capacity_W_each,
panel_brand,
num_inverters,
inverter_capacity_W,
inverter_brand,
orientation,
array_tilt_degrees,
shade,
install_date,
latitude,
longitude,
status_interval_minutes,
secondary_num_panels,
secondary_panel_capacity_W_each,
secondary_orientation,
secondary_array_tilt_degrees
"""
pv_metadata_text = self._api_query(
service="getsystem",
api_params={
"array2": 1, # Provide data about secondary array, if present.
"tariffs": 0,
"teams": 0,
"est": 0,
"donations": 0,
"sid1": pv_system_id, # SystemID
"ext": 0, # Include extended data?
},
**kwargs
)
pv_metadata = pd.read_csv(
StringIO(pv_metadata_text),
lineterminator=";",
names=[
"name",
"system_DC_capacity_W",
"address",
"num_panels",
"panel_capacity_W_each",
"panel_brand",
"num_inverters",
"inverter_capacity_W",
"inverter_brand",
"orientation",
"array_tilt_degrees",
"shade",
"install_date",
"latitude",
"longitude",
"status_interval_minutes",
"secondary_num_panels",
"secondary_panel_capacity_W_each",
"secondary_orientation",
"secondary_array_tilt_degrees",
],
parse_dates=["install_date"],
nrows=1,
).squeeze()
pv_metadata["system_id"] = pv_system_id
pv_metadata.name = pv_system_id
return pv_metadata
def get_statistic(
self,
pv_system_id: int,
date_from: Optional[Union[str, date]] = None,
date_to: Optional[Union[str, date]] = None,
**kwargs
) -> pd.DataFrame:
"""Get summary stats for a single PV system.
Args:
pv_system_id: int
date_from
date_to
Returns:
pd.DataFrame:
total_energy_gen_Wh,
energy_exported_Wh,
average_daily_energy_gen_Wh,
minimum_daily_energy_gen_Wh,
maximum_daily_energy_gen_Wh,
average_efficiency_kWh_per_kW,
num_outputs, # The number of days for which there's >= 1 val.
actual_date_from,
actual_date_to,
record_efficiency_kWh_per_kW,
record_efficiency_date,
query_date_from,
query_date_to
"""
if date_from and not date_to:
date_to = pd.Timestamp.now().date()
if date_to and not date_from:
date_from = pd.Timestamp("1900-01-01").date()
api_params = {
"c": 0, # consumption and import
"crdr": 0, # credits / debits
"sid1": pv_system_id, # SystemID
}
_set_date_param(date_from, api_params, "df")
_set_date_param(date_to, api_params, "dt")
try:
pv_metadata_text = self._api_query(
service="getstatistic", api_params=api_params, **kwargs
)
except NoStatusFound:
pv_metadata_text = ""
columns = [
"total_energy_gen_Wh",
"energy_exported_Wh",
"average_daily_energy_gen_Wh",
"minimum_daily_energy_gen_Wh",
"maximum_daily_energy_gen_Wh",
"average_efficiency_kWh_per_kW",
"num_outputs",
"actual_date_from",
"actual_date_to",
"record_efficiency_kWh_per_kW",
"record_efficiency_date",
]
date_cols = ["actual_date_from", "actual_date_to", "record_efficiency_date"]
numeric_cols = set(columns) - set(date_cols)
pv_metadata = pd.read_csv(
StringIO(pv_metadata_text),
names=columns,
dtype={col: np.float32 for col in numeric_cols},
parse_dates=date_cols,
)
if pv_metadata.empty:
data = {col: np.float32(np.NaN) for col in numeric_cols}
data.update({col: pd.NaT for col in date_cols})
pv_metadata = pd.DataFrame(data, index=[pv_system_id])
else:
pv_metadata.index = [pv_system_id]
pv_metadata["query_date_from"] = pd.Timestamp(date_from) if date_from else pd.NaT
pv_metadata["query_date_to"] = pd.Timestamp(date_to) if date_to else pd.Timestamp.now()
return pv_metadata
def _get_statistic_with_cache(
self,
store_filename: str,
pv_system_id: int,
date_from: Optional[Union[str, date]] = None,
date_to: Optional[Union[str, date]] = None,
**kwargs
) -> pd.Series:
"""Will try to get stats from store_filename['statistics']. If this
fails, or if date_to > query_date_to, or if
date_from < query_date_from, then will call the API. Note that the aim
of this function is just to find the relevant actual_date_from and
actual_date_to, so this function does not respect the other params.
"""
if date_from:
date_from = pd.Timestamp(date_from).date()
if date_to:
date_to = pd.Timestamp(date_to).date()
def _get_fresh_statistic():
_LOG.info("pv_system %d: Getting fresh statistic.", pv_system_id)
stats = self.get_statistic(pv_system_id, **kwargs)
with pd.HDFStore(store_filename, mode="a") as store:
try:
store.remove(key="statistics", where="index=pv_system_id")
except KeyError:
pass
store.append(key="statistics", value=stats)
return stats
try:
stats = pd.read_hdf(store_filename, key="statistics", where="index=pv_system_id")
except (FileNotFoundError, KeyError):
return _get_fresh_statistic()
if stats.empty:
return _get_fresh_statistic()
query_date_from = stats.iloc[0]["query_date_from"]
query_date_to = stats.iloc[0]["query_date_to"]
if (
not pd.isnull(date_from)
and not pd.isnull(query_date_from)
and date_from < query_date_from.date()
):
return _get_fresh_statistic()
if not pd.isnull(date_to) and date_to > query_date_to.date():
return _get_fresh_statistic()
return stats
def download_multiple_systems_to_disk(
self,
system_ids: Iterable[int],
start_date: datetime,
end_date: datetime,
output_filename: str,
timezone: Optional[str] = None,
min_data_availability: Optional[float] = 0.5,
use_get_batch_status_if_available: Optional[bool] = True,
):
"""Download multiple PV system IDs to disk.
Data is saved to `output_filename` in HDF5 format. The exact data
format is documented in
https://github.com/openclimatefix/pvoutput/blob/master/docs/dataset.md
This function is designed to be run for days (!) downloading
gigabytes of PV data :) As such, this function can be safely
interrupted and re-started. All the state required to re-start
is stored in the HDF5 file.
Add appropriate handlers the Python logger `pvoutput` to see progress.
Args:
system_ids: List of PV system IDs to download.
start_date: Start of date range to download.
end_date: End of date range to download.
output_filename: HDF5 filename to write data to.
timezone: String representation of timezone of timeseries data.
e.g. 'Europe/London'.
min_data_availability: A float in the range [0, 1]. 1 means only
accept PV systems which have no days of missing data. 0 means
accept all PV systems, no matter if they have missing data.
Note that the data availability is measured against the date
range for which the PV system has data available, not from
the date range passed into this function.
use_get_batch_status_if_available: Bool. If true then will use
PVOutput's getbatchstatus API (which must be paid for, and
`data_service_url` must be set in `~/.pvoutput.yml` or when
initialising the PVOutput object).
"""
n = len(system_ids)
for i, pv_system_id in enumerate(system_ids):
_LOG.info("**********************")
msg = "system_id {:d}: {:d} of {:d} ({:%})".format(pv_system_id, i + 1, n, (i + 1) / n)
_LOG.info(msg)
print("\r", msg, end="", flush=True)
# Sorted list of DateRange objects. For each DateRange,
# we need to download from start_date to end_date inclusive.
date_ranges_to_download = get_date_ranges_to_download(
output_filename, pv_system_id, start_date, end_date
)
# How much data is actually available?
date_ranges_to_download = self._filter_date_range(
output_filename, pv_system_id, date_ranges_to_download, min_data_availability
)
if not date_ranges_to_download:
_LOG.info("system_id %d: No data left to download :)", pv_system_id)
continue
_LOG.info(
"system_id %d: Will download these date ranges: %s",
pv_system_id,
date_ranges_to_download,
)
if use_get_batch_status_if_available:
if self.data_service_url:
self._download_multiple_using_get_batch_status(
output_filename, pv_system_id, date_ranges_to_download, timezone
)
else:
raise ValueError("data_service_url is not set!")
else:
self._download_multiple_using_get_status(
output_filename, pv_system_id, date_ranges_to_download, timezone
)
def get_insolation_forecast(
self,
date: Union[str, datetime],
pv_system_id: Optional[int] = None,
timezone: Optional[str] = None,
lat: Optional[float] = None,
lon: Optional[float] = None,
**kwargs
):
"""Get Insolation data for a given site, or a given location defined by
longitude and latitude. This is the estimated output for the site
based on ideal weather conditions. Also factors in site age, reducing
ouput by 1% each year, shade and orientation. Need donation mode enabled.
See https://pvoutput.org/help.html#api-getinsolation
Args:
date: str in format YYYYMMDD; or datetime
(localtime of the PV system)
pv_system_id: int
timezone: str
lat: float e.g. -27.4676
lon: float e.g. 153.0279
**kwargs:
Returns:
"""
date = date_to_pvoutput_str(date)
_check_date(date, prediction=True)
api_params = {
"d": date, # date, YYYYMMDD, localtime of the PV system
"sid1": pv_system_id, # SystemID.
"tz": timezone, # defaults to configured timezone of system otherwise GMT
}
if lat is not None and lon is not None:
api_params["ll"] = "{:f},{:f}".format(lat, lon)
try:
pv_insolation_text = self._api_query(
service="getinsolation", api_params=api_params, **kwargs
)
except NoStatusFound:
_LOG.info("system_id %d: No status found for date %s", pv_system_id, date)
pv_insolation_text = ""
columns = ["predicted_power_gen_W", "predicted_cumulative_energy_gen_Wh"]
pv_insolation = pd.read_csv(
StringIO(pv_insolation_text),
lineterminator=";",
names=["time"] + columns,
dtype={col: np.float64 for col in columns},
).sort_index()
pv_insolation.index = pd.to_datetime(
date + " " + pv_insolation.time, format="%Y-%m-%d %H:%M"
)
pv_insolation.drop("time", axis=1, inplace=True)
return pv_insolation
def _filter_date_range(
self,
store_filename: str,
system_id: int,
date_ranges: Iterable[DateRange],
min_data_availability: Optional[float] = 0.5,
) -> List[DateRange]:
"""Check getstatistic to see if system_id has data for all date ranges.
Args:
system_id: PV system ID.
store_filename: HDF5 filename to cache statistics to / from.
date_ranges: List of DateRange objects.
min_data_availability: A float in the range [0, 1]. 1 means only
accept PV systems which have no days of missing data. 0 means
accept all PV systems, no matter if they have missing data.
"""
if not date_ranges:
return date_ranges
stats = self._get_statistic_with_cache(
store_filename,
system_id,
date_to=date_ranges[-1].end_date,
wait_if_rate_limit_exceeded=True,
).squeeze()
if pd.isnull(stats["actual_date_from"]) or pd.isnull(stats["actual_date_to"]):
_LOG.info("system_id %d: Stats say there is no data!", system_id)
return []
timeseries_date_range = DateRange(stats["actual_date_from"], stats["actual_date_to"])
data_availability = stats["num_outputs"] / (timeseries_date_range.total_days() + 1)
if data_availability < min_data_availability:
_LOG.info(
"system_id %d: Data availability too low! Only %.0f %%.",
system_id,
data_availability * 100,
)
return []
new_date_ranges = []
for date_range in date_ranges:
new_date_range = date_range.intersection(timeseries_date_range)
if new_date_range:
new_date_ranges.append(new_date_range)
return new_date_ranges
def _download_multiple_using_get_batch_status(
self, output_filename, pv_system_id, date_ranges_to_download, timezone: Optional[str] = None
):
years = merge_date_ranges_to_years(date_ranges_to_download)
dates_to = [year.end_date for year in years]
total_rows = self._download_multiple_worker(
output_filename, pv_system_id, dates_to, timezone, use_get_status=False
)
# Re-load data, sort, remove duplicate indicies, append back
if total_rows:
with pd.HDFStore(output_filename, mode="a", complevel=9) as store:
sort_and_de_dupe_pv_system(store, pv_system_id)
def _download_multiple_using_get_status(
self, output_filename, pv_system_id, date_ranges_to_download, timezone: Optional[str] = None
):
for date_range in date_ranges_to_download:
dates = date_range.date_range()
self._download_multiple_worker(
output_filename, pv_system_id, dates, timezone, use_get_status=True
)
def _download_multiple_worker(
self, output_filename, pv_system_id, dates, timezone, use_get_status
) -> int:
"""
Returns:
total number of rows downloaded
"""
total_rows = 0
for date_to_load in dates:
_LOG.info("system_id %d: Requesting date: %s", pv_system_id, date_to_load)
datetime_of_api_request = pd.Timestamp.utcnow()
if use_get_status:
timeseries = self.get_status(
pv_system_id, date_to_load, wait_if_rate_limit_exceeded=True
)
else:
timeseries = self.get_batch_status(pv_system_id, date_to=date_to_load)
if timeseries.empty:
_LOG.info(
"system_id %d: Got empty timeseries back for %s", pv_system_id, date_to_load
)
if use_get_status:
_append_missing_date_range(
output_filename,
pv_system_id,
date_to_load,
date_to_load,
datetime_of_api_request,
)
else:
_append_missing_date_range(
output_filename,
pv_system_id,
date_to_load - timedelta(days=365),
date_to_load,
datetime_of_api_request,
)
else:
total_rows += len(timeseries)
timeseries = timeseries.tz_localize(timezone)
_LOG.info(
"system_id: %d: %d rows retrieved: %s to %s",
pv_system_id,
len(timeseries),
timeseries.index[0],
timeseries.index[-1],
)
if use_get_status:
check_pv_system_status(timeseries, date_to_load)
else:
_record_gaps(
output_filename,
pv_system_id,
date_to_load,
timeseries,
datetime_of_api_request,
)
timeseries["datetime_of_API_request"] = datetime_of_api_request
timeseries["query_date"] = pd.Timestamp(date_to_load)
key = system_id_to_hdf_key(pv_system_id)
with | pd.HDFStore(output_filename, mode="a", complevel=9) | pandas.HDFStore |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 28 18:20:48 2021
@author: bosulus
Ensemble of networks for improved accuracy in deep learning
"""
import os
import pandas as pd
import numpy as np
import random
import matplotlib.pyplot as plt
import glob
import cv2
import seaborn as sns
import matplotlib.image as mpimg
from keras.models import Model, Sequential
from keras.layers import Dense, Conv2D, MaxPooling2D, Flatten
from keras.layers import Dropout
from keras.layers.normalization import BatchNormalization
from keras.layers import BatchNormalization
from keras.utils import normalize, to_categorical
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.vgg16 import VGG16 as VGG
from keras.applications.densenet import DenseNet201 as DenseNet
from keras.optimizers import SGD
from keras.layers import GlobalAveragePooling2D, Dense
from keras.models import Model
from keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoard
plt.style.use('classic')
# Read input images and assign labels based on folder names
print(os.listdir("EAC_Dataset/dataset50%"))
SIZE = 224 #Resize images
#Capture training data and labels into respective lists
train_images = []
train_labels = []
for directory_path in glob.glob("EAC_Dataset/dataset50%/train/*"):
label = directory_path.split("\\")[-1]
print(label)
for img_path in glob.glob(os.path.join(directory_path, "*.tif")):
print(img_path)
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
img = cv2.resize(img, (SIZE, SIZE))
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
train_images.append(img)
train_labels.append(label)
#Convert lists to arrays
train_images = np.array(train_images)
train_labels = np.array(train_labels)
# Capture test/validation data and labels into respective lists
test_images = []
test_labels = []
for directory_path in glob.glob("EAC_Dataset/dataset50%/validation/*"):
fruit_label = directory_path.split("\\")[-1]
for img_path in glob.glob(os.path.join(directory_path, "*.tif")):
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
img = cv2.resize(img, (SIZE, SIZE))
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
test_images.append(img)
test_labels.append(fruit_label)
#Convert lists to arrays
test_images = np.array(test_images)
test_labels = np.array(test_labels)
#Encode labels from text to integers.
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
le.fit(test_labels)
test_labels_encoded = le.transform(test_labels)
le.fit(train_labels)
train_labels_encoded = le.transform(train_labels)
#Split data into test and train datasets (already split but assigning to meaningful convention)
x_train, y_train, x_test, y_test = train_images, train_labels_encoded, test_images, test_labels_encoded
###################################################################
# Normalize pixel values to between 0 and 1
x_train, x_test = x_train / 255.0, x_test / 255.0
#One hot encode y values for neural network.
from keras.utils import to_categorical
y_train_one_hot = to_categorical(y_train)
y_test_one_hot = to_categorical(y_test)
#########################################################
#Model 1
model1 = Sequential()
model1.add(Conv2D(32, (3, 3), input_shape = (SIZE,SIZE,3), activation='relu'))
model1.add(MaxPooling2D(pool_size = (2, 2)))
model1.add(Dropout(0.2))
model1.add(Conv2D(64, (3, 3), activation='relu'))
model1.add(MaxPooling2D(pool_size = (2, 2)))
model1.add(Dropout(0.2))
model1.add(Conv2D(128, (3, 3), activation='relu'))
model1.add(MaxPooling2D(pool_size = (2, 2)))
model1.add(Dropout(0.2))
model1.add(Flatten())
model1.add(Dense(128, activation = 'relu'))
model1.add(Dense(9, activation = 'softmax'))
opt = SGD(lr=0.001, momentum=0.9)
model1.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
model1.summary()
#Training the CNN model1
#history = model1.fit(x_train, y_train, batch_size = 128, epochs = 50, verbose = 1, validation_data = (x_test, y_test))
history1 = model1.fit(x_train, y_train_one_hot, epochs = 100, validation_data = (x_test, y_test_one_hot))
model1.save('EAC_Dataset/saved_models/model1/model1.hdf5')
scores= model1.evaluate(x_test, y_test_one_hot, verbose=1)
print('loss:',scores[0])
print('accuracy:', scores[1])
##########################################################
#Model2
model2 = Sequential()
model2.add(Conv2D(32, (3, 3), input_shape = (SIZE,SIZE,3), activation='relu'))
model2.add(Conv2D(32, (3, 3), activation='relu'))
model2.add(MaxPooling2D(pool_size = (2, 2)))
model2.add(Conv2D(64, (3, 3), activation='relu'))
model2.add(Conv2D(64, (3, 3), activation='relu'))
model2.add(Conv2D(64, (3, 3), activation='relu'))
model2.add(MaxPooling2D(pool_size = (2, 2)))
model2.add(Conv2D(128, (3, 3), activation='relu'))
model2.add(Conv2D(9, (1,1)))
model2.add(Flatten())
model2.add(Dense(9, activation = 'softmax'))
opt = SGD(lr=0.001, momentum=0.9)
model2.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
model2.summary()
history2 = model2.fit(x_train, y_train_one_hot, epochs = 100, validation_data = (x_test, y_test_one_hot))
model2.save('EAC_Dataset/saved_models/model1/model2.hdf5')
scores= model2.evaluate(x_test, y_test_one_hot, verbose=1)
print('loss:',scores[0])
print('accuracy:', scores[1])
###################################################################
#Model 3
#
model3 = Sequential()
model3.add(Conv2D(32, (3, 3), input_shape = (SIZE,SIZE,3), activation='relu'))
model3.add(MaxPooling2D(pool_size = (2, 2)))
model3.add(Dropout(0.2))
model3.add(Conv2D(64, (3, 3), activation='relu'))
model3.add(MaxPooling2D(pool_size = (2, 2)))
model3.add(Dropout(0.2))
model3.add(Flatten())
model3.add(Dense(25, activation = 'softmax'))
# compile model
opt = SGD(lr=0.001, momentum=0.9)
model3.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
model3.summary()
#Training the CNN model1
#history = model1.fit(X_train, y_train, batch_size = 128, epochs = 10, verbose = 1, validation_data = (X_test, y_test))
history3 = model3.fit(x_train, y_train_one_hot, epochs = 100, validation_data = (x_test, y_test_one_hot))
model3.save('EAC_Dataset/saved_models/model3.hdf5')
scores= model3.evaluate(x_test, y_test_one_hot, verbose=1)
print('loss:',scores[0])
print('accuracy:', scores[1])
loss = history3.history['loss']
val_loss = history3.history['val_loss']
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, 'y', label='Training loss')
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
acc = history3.history['accuracy']
val_acc= history3.history['val_accuracy']
plt.plot(epochs, acc, 'y', label='Training accuracy')
plt.plot(epochs, val_acc, 'r', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
#########################################################################
### Model average / sum Ensemble
# Simple sum of all outputs / predictions and argmax across all classes
########
from keras.models import load_model
from sklearn.metrics import accuracy_score
model1 = load_model('EAC_Dataset/saved_model/smodel1/model1.hdf5')
model2 = load_model('EAC_Dataset/saved_models/model1/model2.hdf5')
model3 = load_model('EAC_Dataset/saved_models/model1/model3.hdf5')
models = [model1, model2, model3]
preds = [model.predict(x_test) for model in models]
preds=np.array(preds)
summed = np.sum(preds, axis=0)
# argmax across classes
ensemble_prediction = np.argmax(summed, axis=1)
prediction1 = model1.predict_classes(x_test)
prediction2 = model2.predict_classes(x_test)
prediction3 = model3.predict_classes(x_test)
accuracy1 = accuracy_score(y_test, prediction1)
accuracy2 = accuracy_score(y_test, prediction2)
accuracy3 = accuracy_score(y_test, prediction3)
ensemble_accuracy = accuracy_score(y_test, ensemble_prediction)
print('Accuracy Score for model1 = ', accuracy1)
print('Accuracy Score for model2 = ', accuracy2)
print('Accuracy Score for model3 = ', accuracy3)
print('Accuracy Score for average ensemble = ', ensemble_accuracy)
########################################
#Weighted average ensemble
models = [model1, model2, model3]
preds = [model.predict(x_test) for model in models]
preds=np.array(preds)
weights = [0.4, 0.2, 0.4]
#Use tensordot to sum the products of all elements over specified axes.
weighted_preds = np.tensordot(preds, weights, axes=((0),(0)))
weighted_ensemble_prediction = np.argmax(weighted_preds, axis=1)
weighted_accuracy = accuracy_score(y_test, weighted_ensemble_prediction)
print('Accuracy Score for model1 = ', accuracy1)
print('Accuracy Score for model2 = ', accuracy2)
print('Accuracy Score for model3 = ', accuracy3)
print('Accuracy Score for average ensemble = ', ensemble_accuracy)
print('Accuracy Score for weighted average ensemble = ', weighted_accuracy)
########################################
#Grid search for the best combination of w1, w2, w3 that gives maximum acuracy
models = [model1, model2, model3]
preds1 = [model.predict(x_test) for model in models]
preds1=np.array(preds1)
df = | pd.DataFrame([]) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # COVID-19: DATA SCIENCE AND MACHINE LEARNING VISUALIZATIONS
#
# I am currently using the [NYTimes](https://github.com/nytimes/covid-19-data) and [JHU CSSE](https://github.com/CSSEGISandData/COVID-19) database in this notebook. NYTimes is for USA data and JHU CSSE is for international data. Conveniently, they are all written in `.csv` files which Pandas can take great advantage of.
#
# If you are planning to run the notebook on your own computer, please make sure you have all the dependencies installed!
# In[ ]:
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import geopandas
import pycountry
import plotly
import plotly.express as px
import plotly.figure_factory as ff
import time
from datetime import datetime
import json
import os
from urllib.request import urlopen
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from pathlib import Path
get_ipython().run_line_magic('matplotlib', 'inline')
with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response:
counties = json.load(response)
plotly.io.orca.config.executable = '/path/to/orca'
plotly.io.orca.config.save()
# ## Pulling the Databases
# For cloning the repository that contain the databases, I used `.gitignore` to prevent pushing large files to this repository. I do not modify the directories that contain the databases and I leave them as is.
# In[ ]:
#Please run this cell if databases directory is empty!
get_ipython().system('git clone https://github.com/CSSEGISandData/COVID-19 databases/jhucsse')
get_ipython().system('git clone https://github.com/nytimes/covid-19-data databases/nytimes')
# Run the cells below to get the most recent databases!
# In[ ]:
get_ipython().system('git -C databases/jhucsse pull origin master')
get_ipython().system('ls databases/jhucsse')
# In[ ]:
get_ipython().system('git -C databases/nytimes pull origin master')
get_ipython().system('ls databases/nytimes')
# ## Load `.csv` Files into Pandas DataFrames
# All the recent databases are pulled from GitHub
# In[ ]:
COVID19_confirmed = pd.read_csv("databases/jhucsse/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv")
COVID19_deaths = pd.read_csv("databases/jhucsse/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv")
COVID19_recovered = pd.read_csv("databases/jhucsse/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv")
#Global variables
databases = [COVID19_confirmed, COVID19_deaths, COVID19_recovered]
dataset = [(COVID19_confirmed, "confirmed"), (COVID19_deaths, "deaths"), (COVID19_recovered, "recovered")]
all_countries = list(COVID19_confirmed['Country/Region'])
all_countries = list(dict.fromkeys(all_countries))
dates = list(COVID19_confirmed.columns)
dates.remove('Country/Region')
dates.remove('Province/State')
dates.remove('Lat')
dates.remove('Long')
# In[ ]:
COVID19_US = pd.read_csv("databases/nytimes/us.csv") #Already provided with the JHU CSSE
COVID19_US_states = pd.read_csv("databases/nytimes/us-states.csv")
COVID19_US_counties = | pd.read_csv("databases/nytimes/us-counties.csv", dtype={"fips": str}) | pandas.read_csv |
import requests, re, json, csv
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
confirmed_CSV_URL = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Confirmed.csv'
deaths_CSV_URL = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Deaths.csv'
recovered_CSV_URL = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Recovered.csv'
confirmed_total_data = []
deaths_total_data = []
recovered_total_data = []
with requests.Session() as s:
download = s.get(confirmed_CSV_URL)
decoded_content = download.content.decode('utf-8')
cr = csv.reader(decoded_content.splitlines(), delimiter=',')
my_list = list(cr)
for row in my_list:
confirmed_total_data.append(row)
with requests.Session() as s:
download = s.get(deaths_CSV_URL)
decoded_content = download.content.decode('utf-8')
cr = csv.reader(decoded_content.splitlines(), delimiter=',')
my_list = list(cr)
for row in my_list:
deaths_total_data.append(row)
with requests.Session() as s:
download = s.get(recovered_CSV_URL)
decoded_content = download.content.decode('utf-8')
cr = csv.reader(decoded_content.splitlines(), delimiter=',')
my_list = list(cr)
for row in my_list:
recovered_total_data.append(row)
# confirmed_total_data[0]
confirmed_df = pd.DataFrame(confirmed_total_data[1:], columns=confirmed_total_data[0])
deaths_df = | pd.DataFrame(deaths_total_data, columns=deaths_total_data[0]) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""snake-on-pygame: A simple and fun snake game, playable by Human and AI.
This module is the implementation of the snake game on pygame, focusing on speed
and simplicity. It's playable by both humans and AI agents and it uses most of
pygame's optimizations to deliver a smooth experience in testing/playing.
Usage for human players
----------
To play as a human, you only need to run this file, given you have the
needed dependencies.
$ python snake.py
Usage for AI agents
----------
To use with AI agents, you need to integrate the game with the AI agent. An
example usage is:
>>> from snake-on-pygame import Game
>>> game = Game(player = "ROBOT",
board_size = board_size,
local_state = local_state,
relative_pos = RELATIVE_POS)
Useful properties:
>>> print(game.nb_actions)
5 # number of actions.
>>> print(game.food_pos)
(6, 5) # current position of food.
>>> print(game.steps)
10 # current number of steps in a given episode.
>>> print(game.snake.length)
4 # current length of the snake in a given episode.
Possible methods:
>>> state = game.reset()
Reset the game and returns the game state right after resetting.
>>> state = game.state()
Get the current game state.
>>> game.food_pos = game.generate_food()
Update the food position.
>>> state, reward, done, info = game.step(numerical_action)
Play a numerical_action, obtaining state, reward, over and info.
>>> game.render()
Render the game in a pygame window.
TO DO
----------
- Publish to pip.
"""
import sys # To close the window when the game is over
from array import array # Efficient numeric arrays
from os import environ, path # To center the game window the best possible
import random # Random numbers used for the food
import logging # Logging function for movements and errors
import json # For file handling (leaderboards)
from itertools import tee # For the color gradient on snake
import pygame # This is the engine used in the game
import numpy as np # Used in calculations and math
import pandas as pd # Used to manage the leaderboards data
from utilities.text_block import TextBlock, InputBox # Textblocks for pygame
__author__ = "<NAME>"
__license__ = "MIT"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
# Actions, options and forbidden moves
OPTIONS = {
"QUIT": 0,
"PLAY": 1,
"BENCHMARK": 2,
"LEADERBOARDS": 3,
"MENU": 4,
"ADD_TO_LEADERBOARDS": 5,
}
RELATIVE_ACTIONS = {"LEFT": 0, "FORWARD": 1, "RIGHT": 2}
ABSOLUTE_ACTIONS = {"LEFT": 0, "RIGHT": 1, "UP": 2, "DOWN": 3, "IDLE": 4}
FORBIDDEN_MOVES = [(0, 1), (1, 0), (2, 3), (3, 2)]
# Possible rewards in the game
REWARDS = {"MOVE": -0.005, "GAME_OVER": -1, "SCORED": 1}
# Types of point in the board
POINT_TYPE = {"EMPTY": 0, "FOOD": 1, "BODY": 2, "HEAD": 3, "DANGEROUS": 4}
# Speed levels possible to human players. MEGA HARDCORE starts with MEDIUM and
# increases with snake size
LEVELS = [" EASY ", " MEDIUM ", " HARD ", " MEGA HARDCORE "]
SPEEDS = {"EASY": 80, "MEDIUM": 60, "HARD": 40, "MEGA_HARDCORE": 65}
# Set the constant FPS limit for the game. Smoothness depend on this.
GAME_FPS = 100
class GlobalVariables:
"""Global variables to be used while drawing and moving the snake game.
Attributes
----------
board_size: int, optional, default = 30
The size of the board.
block_size: int, optional, default = 20
The size in pixels of a block.
head_color: tuple of 3 * int, optional, default = (42, 42, 42)
Color of the head. Start of the body color gradient.
tail_color: tuple of 3 * int, optional, default = (152, 152, 152)
Color of the tail. End of the body color gradient.
food_color: tuple of 3 * int, optional, default = (200, 0, 0)
Color of the food.
game_speed: int, optional, default = 10
Speed in ticks of the game. The higher the faster.
benchmark: int, optional, default = 10
Ammount of matches to benchmark and possibly go to leaderboards.
"""
def __init__(
self,
board_size=30,
block_size=20,
head_color=(42, 42, 42),
tail_color=(152, 152, 152),
food_color=(200, 0, 0),
game_speed=80,
benchmark=1,
):
"""Initialize all global variables. Updated with argument_handler."""
self.board_size = board_size
self.block_size = block_size
self.head_color = head_color
self.tail_color = tail_color
self.food_color = food_color
self.game_speed = game_speed
self.benchmark = benchmark
if self.board_size > 50: # Warn the user about performance
LOGGER.warning("WARNING: BOARD IS TOO BIG, IT MAY RUN SLOWER.")
@property
def canvas_size(self):
"""Canvas size is updated with board_size and block_size."""
return self.board_size * self.block_size
class Snake:
"""Player (snake) class which initializes head, body and board.
The body attribute represents a list of positions of the body, which are in-
cremented when moving/eating on the position [0]. The orientation represents
where the snake is looking at (head) and collisions happen when any element
is superposed with the head.
Attributes
----------
head: list of 2 * int, default = [board_size / 4, board_size / 4]
The head of the snake, located according to the board size.
body: list of lists of 2 * int
Starts with 3 parts and grows when food is eaten.
previous_action: int, default = 1
Last action which the snake took.
length: int, default = 3
Variable length of the snake, can increase when food is eaten.
"""
def __init__(self):
"""Inits Snake with 3 body parts (one is the head) and pointing right"""
self.head = [int(VAR.board_size / 4), int(VAR.board_size / 4)]
self.body = [
[self.head[0], self.head[1]],
[self.head[0] - 1, self.head[1]],
[self.head[0] - 2, self.head[1]],
]
self.previous_action = 1
self.length = 3
def is_movement_invalid(self, action):
"""Check if the movement is invalid, according to FORBIDDEN_MOVES."""
invalid = False
if (
action,
self.previous_action,
) in FORBIDDEN_MOVES or action == ABSOLUTE_ACTIONS["IDLE"]:
invalid = True
return invalid
def move(self, action, food_pos):
"""According to orientation, move 1 block. If the head is not positioned
on food, pop a body part. Else, return without popping.
Return
----------
ate_food: boolean
Flag which represents whether the snake ate or not food.
"""
ate_food = False
if self.is_movement_invalid(action):
action = self.previous_action
else:
self.previous_action = action
if action == ABSOLUTE_ACTIONS["LEFT"]:
self.head[0] -= 1
elif action == ABSOLUTE_ACTIONS["RIGHT"]:
self.head[0] += 1
elif action == ABSOLUTE_ACTIONS["UP"]:
self.head[1] -= 1
elif action == ABSOLUTE_ACTIONS["DOWN"]:
self.head[1] += 1
self.body.insert(0, list(self.head))
if self.head == food_pos:
LOGGER.info("EVENT: FOOD EATEN")
self.length = len(self.body)
ate_food = True
else:
self.body.pop()
return ate_food
class FoodGenerator:
"""Generate and keep track of food.
Attributes
----------
pos:
Current position of food.
is_food_on_screen:
Flag for existence of food.
"""
def __init__(self, body):
"""Initialize a food piece and set existence flag."""
self.is_food_on_screen = False
self.pos = self.generate_food(body)
def generate_food(self, body):
"""Generate food and verify if it's on a valid place.
Return
----------
pos: tuple of 2 * int
Position of the food that was generated. It can't be in the body.
"""
if not self.is_food_on_screen:
while True:
food = [
int((VAR.board_size - 1) * random.random()),
int((VAR.board_size - 1) * random.random()),
]
if food in body:
continue
else:
self.pos = food
break
LOGGER.info("EVENT: FOOD APPEARED")
self.is_food_on_screen = True
return self.pos
class Game:
"""Hold the game window and functions.
Attributes
----------
window: pygame display
Pygame window to show the game.
fps: pygame time clock
Define Clock and ticks in which the game will be displayed.
snake: object
The actual snake who is going to be played.
food_generator: object
Generator of food which responds to the snake.
food_pos: tuple of 2 * int
Position of the food on the board.
game_over: boolean
Flag for game_over.
player: string
Define if human or robots are playing the game.
board_size: int, optional, default = 30
The size of the board.
local_state: boolean, optional, default = False
Whether to use or not game expertise (used mostly by robots players).
relative_pos: boolean, optional, default = False
Whether to use or not relative position of the snake head. Instead of
actions, use relative_actions.
screen_rect: tuple of 2 * int
The screen rectangle, used to draw relatively positioned blocks.
"""
def __init__(
self, player="HUMAN", board_size=30, local_state=False, relative_pos=False
):
"""Initialize window, fps and score. Change nb_actions if relative_pos"""
VAR.board_size = board_size
self.local_state = local_state
self.relative_pos = relative_pos
self.player = player
if player == "ROBOT":
if self.relative_pos:
self.nb_actions = 3
else:
self.nb_actions = 5
self.action_space = self.nb_actions
self.observation_space = np.empty(shape=(board_size ** 2,))
self.reset()
self.font_path = self.resource_path("resources/fonts/product_sans_bold.ttf")
self.logo_path = self.resource_path("resources/images/ingame_snake_logo.png")
def reset(self):
"""Reset the game environment."""
self.steps = 0
self.snake = Snake()
self.food_generator = FoodGenerator(self.snake.body)
self.food_pos = self.food_generator.pos
self.scored = False
self.game_over = False
return self.state()
def create_window(self):
"""Create a pygame display with board_size * block_size dimension."""
pygame.init()
flags = pygame.DOUBLEBUF | pygame.HWSURFACE
self.window = pygame.display.set_mode((VAR.canvas_size, VAR.canvas_size), flags)
self.window.set_alpha(None)
self.screen_rect = self.window.get_rect()
self.fps = pygame.time.Clock()
def cycle_menu(
self,
menu_options,
list_menu,
dictionary,
img=None,
img_rect=None,
leaderboards=False,
):
"""Cycle through a given menu, waiting for an option to be clicked."""
selected = False
selected_option = None
while not selected:
pygame.event.pump()
events = pygame.event.get()
self.window.fill(pygame.Color(225, 225, 225))
for i, option in enumerate(menu_options):
if option is not None:
option.draw()
option.hovered = False
if (
option.rect.collidepoint(pygame.mouse.get_pos())
and option.block_type != "text"
):
option.hovered = True
for event in events:
if event.type == pygame.MOUSEBUTTONUP:
if leaderboards:
opt = list_menu[i]
if opt == "MENU":
return dictionary[opt], None
else:
pages = len(opt.rstrip("0123456789"))
page = int(opt[pages:])
selected_option = dictionary[opt[:pages]]
return selected_option, page
else:
selected_option = dictionary[list_menu[i]]
if selected_option is not None:
selected = True
if img is not None:
self.window.blit(img, img_rect.bottomleft)
pygame.display.update()
return selected_option
def cycle_matches(self, n_matches, mega_hardcore=False):
"""Cycle through matches until the end."""
score = array("i")
step = array("i")
for _ in range(n_matches):
self.reset()
self.start_match(wait=3)
current_score, current_step = self.single_player(mega_hardcore)
score.append(current_score)
step.append(current_step)
return score, step
def menu(self):
"""Main menu of the game.
Return
----------
selected_option: int
The selected option in the main loop.
"""
pygame.display.set_caption("snake-on-pygme | PLAY NOW!")
img = pygame.image.load(self.logo_path).convert()
img = pygame.transform.scale(img, (VAR.canvas_size, int(VAR.canvas_size / 3)))
img_rect = img.get_rect()
img_rect.center = self.screen_rect.center
list_menu = ["PLAY", "BENCHMARK", "LEADERBOARDS", "QUIT"]
menu_options = [
TextBlock(
text=" PLAY GAME ",
pos=(self.screen_rect.centerx, 4 * self.screen_rect.centery / 10),
canvas_size=VAR.canvas_size,
font_path=self.font_path,
window=self.window,
scale=(1 / 12),
block_type="menu",
),
TextBlock(
text=" BENCHMARK ",
pos=(self.screen_rect.centerx, 6 * self.screen_rect.centery / 10),
canvas_size=VAR.canvas_size,
font_path=self.font_path,
window=self.window,
scale=(1 / 12),
block_type="menu",
),
TextBlock(
text=" LEADERBOARDS ",
pos=(self.screen_rect.centerx, 8 * self.screen_rect.centery / 10),
canvas_size=VAR.canvas_size,
font_path=self.font_path,
window=self.window,
scale=(1 / 12),
block_type="menu",
),
TextBlock(
text=" QUIT ",
pos=(self.screen_rect.centerx, 10 * self.screen_rect.centery / 10),
canvas_size=VAR.canvas_size,
font_path=self.font_path,
window=self.window,
scale=(1 / 12),
block_type="menu",
),
]
selected_option = self.cycle_menu(
menu_options, list_menu, OPTIONS, img, img_rect
)
return selected_option
def start_match(self, wait):
"""Create some wait time before the actual drawing of the game."""
for i in range(wait):
self.window.fill(pygame.Color(225, 225, 225))
time = " {:d} ".format(wait - i)
# Game starts in 3, 2, 1
text = [
TextBlock(
text=" Game starts in ",
pos=(self.screen_rect.centerx, 4 * self.screen_rect.centery / 10),
canvas_size=VAR.canvas_size,
font_path=self.font_path,
window=self.window,
scale=(1 / 12),
block_type="text",
),
TextBlock(
text=time,
pos=(self.screen_rect.centerx, 12 * self.screen_rect.centery / 10),
canvas_size=VAR.canvas_size,
font_path=self.font_path,
window=self.window,
scale=(1 / 1.5),
block_type="text",
),
]
for text_block in text:
text_block.draw()
pygame.display.update()
pygame.display.set_caption(
"snake-on-pygame | Game starts in " + time + " second(s) ..."
)
pygame.time.wait(1000)
LOGGER.info("EVENT: GAME START")
def start(self):
"""Use menu to select the option/game mode."""
opt = self.menu()
while True:
page = 1
if opt == OPTIONS["QUIT"]:
pygame.quit()
sys.exit()
elif opt == OPTIONS["PLAY"]:
VAR.game_speed, mega_hardcore = self.select_speed()
score, _ = self.cycle_matches(n_matches=1, mega_hardcore=mega_hardcore)
opt = self.over(score, None)
elif opt == OPTIONS["BENCHMARK"]:
VAR.game_speed, mega_hardcore = self.select_speed()
score, steps = self.cycle_matches(
n_matches=VAR.benchmark, mega_hardcore=mega_hardcore
)
opt = self.over(score, steps)
elif opt == OPTIONS["LEADERBOARDS"]:
while page is not None:
opt, page = self.view_leaderboards(page)
elif opt == OPTIONS["MENU"]:
opt = self.menu()
if opt == OPTIONS["ADD_TO_LEADERBOARDS"]:
self.add_to_leaderboards(int(np.mean(score)), int(np.mean(steps)))
opt, page = self.view_leaderboards()
def over(self, score, step):
"""If collision with wall or body, end the game and open options.
Return
----------
selected_option: int
The selected option in the main loop.
"""
score_option = None
if len(score) == VAR.benchmark:
score_option = TextBlock(
text=" ADD TO LEADERBOARDS ",
pos=(self.screen_rect.centerx, 8 * self.screen_rect.centery / 10),
canvas_size=VAR.canvas_size,
font_path=self.font_path,
window=self.window,
scale=(1 / 15),
block_type="menu",
)
text_score = "SCORE: " + str(int(np.mean(score)))
list_menu = ["PLAY", "MENU", "ADD_TO_LEADERBOARDS", "QUIT"]
menu_options = [
TextBlock(
text=" PLAY AGAIN ",
pos=(self.screen_rect.centerx, 4 * self.screen_rect.centery / 10),
canvas_size=VAR.canvas_size,
font_path=self.font_path,
window=self.window,
scale=(1 / 15),
block_type="menu",
),
TextBlock(
text=" GO TO MENU ",
pos=(self.screen_rect.centerx, 6 * self.screen_rect.centery / 10),
canvas_size=VAR.canvas_size,
font_path=self.font_path,
window=self.window,
scale=(1 / 15),
block_type="menu",
),
score_option,
TextBlock(
text=" QUIT ",
pos=(self.screen_rect.centerx, 10 * self.screen_rect.centery / 10),
canvas_size=VAR.canvas_size,
font_path=self.font_path,
window=self.window,
scale=(1 / 15),
block_type="menu",
),
TextBlock(
text=text_score,
pos=(self.screen_rect.centerx, 15 * self.screen_rect.centery / 10),
canvas_size=VAR.canvas_size,
font_path=self.font_path,
window=self.window,
scale=(1 / 10),
block_type="text",
),
]
pygame.display.set_caption(
"snake-on-pygame | " + text_score + " | GAME OVER..."
)
LOGGER.info("EVENT: GAME OVER | FINAL %s", text_score)
selected_option = self.cycle_menu(menu_options, list_menu, OPTIONS)
return selected_option
def select_speed(self):
"""Speed menu, right before calling start_match.
Return
----------
speed: int
The selected speed in the main loop.
"""
list_menu = ["EASY", "MEDIUM", "HARD", "MEGA_HARDCORE"]
menu_options = [
TextBlock(
text=LEVELS[i],
pos=(
self.screen_rect.centerx,
4 * (i + 1) * self.screen_rect.centery / 10,
),
canvas_size=VAR.canvas_size,
font_path=self.font_path,
window=self.window,
scale=(1 / 10),
block_type="menu",
)
for i in range(len(list_menu))
]
speed = self.cycle_menu(menu_options, list_menu, SPEEDS)
mega_hardcore = False
if speed == SPEEDS["MEGA_HARDCORE"]:
mega_hardcore = True
return speed, mega_hardcore
def single_player(self, mega_hardcore=False):
"""Game loop for single_player (HUMANS).
Return
----------
score: int
The final score for the match (discounted of initial length).
"""
# The main loop, it pump key_presses and update the board every tick.
previous_size = self.snake.length # Initial size of the snake
current_size = previous_size # Initial size
color_list = self.gradient([(VAR.head_color), (VAR.tail_color)], previous_size)
# Main loop, where snakes moves after elapsed time is bigger than the
# move_wait time. The last_key pressed is recorded to make the game more
# smooth for human players.
elapsed = 0
last_key = self.snake.previous_action
move_wait = VAR.game_speed
while not self.game_over:
elapsed += self.fps.get_time() # Get elapsed time since last call.
if mega_hardcore: # Progressive speed increments, the hardest level.
move_wait = VAR.game_speed - (2 * (self.snake.length - 3))
key_input = self.handle_input() # Receive inputs with tick.
if key_input == "Q":
return current_size - 3, self.steps
if key_input is not None:
last_key = key_input
if elapsed >= move_wait: # Move and redraw
elapsed = 0
self.play(last_key)
current_size = self.snake.length # Update the body size
if current_size > previous_size:
color_list = self.gradient(
[(VAR.head_color), (VAR.tail_color)], current_size
)
previous_size = current_size
self.draw(color_list)
pygame.display.update()
self.fps.tick(GAME_FPS) # Limit FPS to 100
score = current_size - 3 # After the game is over, record score
return score, self.steps
def check_collision(self):
"""Check wether any collisions happened with the wall or body.
Return
----------
collided: boolean
Whether the snake collided or not.
"""
collided = False
if self.snake.head[0] > (VAR.board_size - 1) or self.snake.head[0] < 0:
LOGGER.info("EVENT: WALL COLLISION")
collided = True
elif self.snake.head[1] > (VAR.board_size - 1) or self.snake.head[1] < 0:
LOGGER.info("EVENT: WALL COLLISION")
collided = True
elif self.snake.head in self.snake.body[1:]:
LOGGER.info("EVENT: BODY COLLISION")
collided = True
return collided
def is_won(self):
"""Verify if the score is greater than 0.
Return
----------
won: boolean
Whether the score is greater than 0.
"""
return self.snake.length > 3
def generate_food(self):
"""Generate new food if needed.
Return
----------
food_pos: tuple of 2 * int
Current position of the food.
"""
food_pos = self.food_generator.generate_food(self.snake.body)
return food_pos
def handle_input(self):
"""After getting current pressed keys, handle important cases.
Return
----------
action: int
Handle human input to assess the next action.
"""
pygame.event.set_allowed([pygame.QUIT, pygame.KEYDOWN])
keys = pygame.key.get_pressed()
pygame.event.pump()
action = None
if keys[pygame.K_ESCAPE] or keys[pygame.K_q]:
LOGGER.info("ACTION: KEY PRESSED: ESCAPE or Q")
action = "Q"
elif keys[pygame.K_LEFT]:
LOGGER.info("ACTION: KEY PRESSED: LEFT")
action = ABSOLUTE_ACTIONS["LEFT"]
elif keys[pygame.K_RIGHT]:
LOGGER.info("ACTION: KEY PRESSED: RIGHT")
action = ABSOLUTE_ACTIONS["RIGHT"]
elif keys[pygame.K_UP]:
LOGGER.info("ACTION: KEY PRESSED: UP")
action = ABSOLUTE_ACTIONS["UP"]
elif keys[pygame.K_DOWN]:
LOGGER.info("ACTION: KEY PRESSED: DOWN")
action = ABSOLUTE_ACTIONS["DOWN"]
return action
def state(self):
"""Create a matrix of the current state of the game.
Return
----------
canvas: np.array of size board_size**2
Return the current state of the game in a matrix.
"""
canvas = np.zeros((VAR.board_size, VAR.board_size))
if self.game_over:
pass
else:
body = self.snake.body
for part in body:
canvas[part[0], part[1]] = POINT_TYPE["BODY"]
canvas[body[0][0], body[0][1]] = POINT_TYPE["HEAD"]
if self.local_state:
canvas = self.eval_local_safety(canvas, body)
canvas[self.food_pos[0], self.food_pos[1]] = POINT_TYPE["FOOD"]
return canvas
def relative_to_absolute(self, action):
"""Translate relative actions to absolute.
Return
----------
action: int
Translated action from relative to absolute.
"""
if action == RELATIVE_ACTIONS["FORWARD"]:
action = self.snake.previous_action
elif action == RELATIVE_ACTIONS["LEFT"]:
if self.snake.previous_action == ABSOLUTE_ACTIONS["LEFT"]:
action = ABSOLUTE_ACTIONS["DOWN"]
elif self.snake.previous_action == ABSOLUTE_ACTIONS["RIGHT"]:
action = ABSOLUTE_ACTIONS["UP"]
elif self.snake.previous_action == ABSOLUTE_ACTIONS["UP"]:
action = ABSOLUTE_ACTIONS["LEFT"]
else:
action = ABSOLUTE_ACTIONS["RIGHT"]
else:
if self.snake.previous_action == ABSOLUTE_ACTIONS["LEFT"]:
action = ABSOLUTE_ACTIONS["UP"]
elif self.snake.previous_action == ABSOLUTE_ACTIONS["RIGHT"]:
action = ABSOLUTE_ACTIONS["DOWN"]
elif self.snake.previous_action == ABSOLUTE_ACTIONS["UP"]:
action = ABSOLUTE_ACTIONS["RIGHT"]
else:
action = ABSOLUTE_ACTIONS["LEFT"]
return action
def play(self, action):
"""Move the snake to the direction, eat and check collision."""
self.scored = False
self.steps += 1
self.food_pos = self.generate_food()
if self.relative_pos:
action = self.relative_to_absolute(action)
if self.snake.move(action, self.food_pos):
self.scored = True
self.food_generator.is_food_on_screen = False
if self.player == "HUMAN":
if self.check_collision():
self.game_over = True
elif self.check_collision() or self.steps > 50 * self.snake.length:
self.game_over = True
def get_reward(self):
"""Return the current reward. Can be used as the reward function.
Return
----------
reward: float
Current reward of the game.
"""
reward = REWARDS["MOVE"]
if self.game_over:
reward = REWARDS["GAME_OVER"]
elif self.scored:
reward = self.snake.length
return reward
def draw(self, color_list):
"""Draw the game, the snake and the food using pygame."""
self.window.fill(pygame.Color(225, 225, 225))
for part, color in zip(self.snake.body, color_list):
pygame.draw.rect(
self.window,
color,
pygame.Rect(
(part[0] * VAR.block_size),
part[1] * VAR.block_size,
VAR.block_size,
VAR.block_size,
),
)
pygame.draw.rect(
self.window,
VAR.food_color,
pygame.Rect(
self.food_pos[0] * VAR.block_size,
self.food_pos[1] * VAR.block_size,
VAR.block_size,
VAR.block_size,
),
)
pygame.display.set_caption(
"snake-on-pygame | Score: " + str(self.snake.length - 3)
)
def step(self, action):
"""Play the action and returns state, reward and if over."""
self.play(action)
return self.state(), self.get_reward(), self.game_over, None
def render(self):
if not hasattr(self, "window"):
self.create_window()
size = self.snake.length # Size of the snake
color_list = self.gradient([VAR.head_color, VAR.tail_color], size)
self.draw(color_list)
pygame.display.update()
self.fps.tick(60) # Limit FPS to 100
def get_name(self):
"""See test.py in my desktop, for a textinput_box input in pygame"""
done = False
input_box = InputBox(
x=200,
y=300,
w=140,
h=32,
window=self.window,
font_path=self.resource_path("resources/fonts/product_sans_bold.ttf"),
)
text_block = TextBlock(
text=" <NAME> ",
pos=(self.screen_rect.centerx, 0.9 * self.screen_rect.centery),
canvas_size=VAR.canvas_size,
font_path=self.font_path,
window=self.window,
scale=(1 / 24),
block_type="text",
)
while not done:
pygame.event.pump()
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
done = True
text = input_box.handle_event(event)
if text is not None:
done = True
input_box.update()
self.window.fill(pygame.Color(225, 225, 225))
input_box.draw()
text_block.draw()
pygame.display.update()
return text
def add_to_leaderboards(self, score, step):
file_path = self.resource_path("resources/scores.json")
name = self.get_name()
new_score = {"name": str(name), "ranking_data": {"score": score, "step": step}}
if not path.isfile(file_path):
data = []
data.append(new_score)
with open(file_path, mode="w") as leaderboards_file:
json.dump(data, leaderboards_file, indent=4)
else:
with open(file_path) as leaderboards_file:
data = json.load(leaderboards_file)
data.append(new_score)
data.sort(key=lambda e: e["ranking_data"]["score"], reverse=True)
with open(file_path, mode="w") as leaderboards_file:
json.dump(data, leaderboards_file, indent=4)
def view_leaderboards(self, page=1):
file_path = self.resource_path("resources/scores.json")
with open(file_path, "r") as leaderboards_file:
scores_data = json.loads(leaderboards_file.read())
dataframe = | pd.DataFrame.from_dict(scores_data) | pandas.DataFrame.from_dict |
import numpy as np
import pandas
import random
import re
import sys
from scipy.stats import pearsonr, spearmanr
# ausiliary functions
def buildSeriesByCategory(df, categories):
res = []
for cat in categories:
occ = df.loc[df["category"] == cat].shape[0]
res.append(occ)
res_series = pandas.Series(res, index=categories)
return res_series
# convert dataframe that can be used to generate likert plot
def convertToRLikertFormat(dataframe):
# get rows from dataframe
version_series = dataframe.loc["Versioning"]
manual_series = dataframe.loc["Manual-Job"]
retry_series = dataframe.loc["Job-Retry"]
allow_series = dataframe.loc["Job-Allow-Failure"]
overall_series = dataframe.loc["overall"]
# convert to R format and fill the dictionary
dict_of_columns = {}
dict_of_columns['Fuzzy Version'] = fillList(version_series)
dict_of_columns['Manual Execution'] = fillList(manual_series)
dict_of_columns['Retry Failure'] = fillList(retry_series)
dict_of_columns['Fake Success'] = fillList(allow_series)
dict_of_columns['Overall'] = fillList(overall_series)
# merge everything in one dataframe
result = | pandas.DataFrame(dict_of_columns) | pandas.DataFrame |
from cplvm import CPLVM
from cplvm import CPLVMLogNormalApprox
from pcpca import CPCA, PCPCA
from clvm_gaussian import fit_model as fit_clvm_gaussian
import functools
import warnings
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability import distributions as tfd
import matplotlib
import time
font = {"size": 30}
matplotlib.rc("font", **font)
matplotlib.rcParams["text.usetex"] = True
tf.enable_v2_behavior()
warnings.filterwarnings("ignore")
if __name__ == "__main__":
n_samples_list = [10, 50, 100, 500, 1000]
# n_samples_list = [10, 50]
num_datapoints_x, num_datapoints_y = 200, 200
n_genes = 200
NUM_REPEATS = 10
latent_dim_shared, latent_dim_foreground = 3, 3
times_cplvm = np.empty((NUM_REPEATS, len(n_samples_list)))
times_clvm_gaussian = np.empty((NUM_REPEATS, len(n_samples_list)))
times_pcpca = np.empty((NUM_REPEATS, len(n_samples_list)))
times_cpca = np.empty((NUM_REPEATS, len(n_samples_list)))
for ii, n_samples in enumerate(n_samples_list):
for jj in range(NUM_REPEATS):
# ------- generate data ---------
cplvm_for_data = CPLVM(
k_shared=latent_dim_shared, k_foreground=latent_dim_foreground
)
concrete_cplvm_model = functools.partial(
cplvm_for_data.model,
data_dim=n_genes,
num_datapoints_x=n_samples,
num_datapoints_y=n_samples,
counts_per_cell_X=1,
counts_per_cell_Y=1,
is_H0=False,
)
model = tfd.JointDistributionCoroutineAutoBatched(concrete_cplvm_model)
deltax, sf_x, sf_y, s, zx, zy, w, ty, X_sampled, Y_sampled = model.sample()
X, Y = X_sampled.numpy(), Y_sampled.numpy()
##### CPLVM #####
t0 = time.time()
cplvm = CPLVM(
k_shared=latent_dim_shared, k_foreground=latent_dim_foreground
)
approx_model = CPLVMLogNormalApprox(
X, Y, latent_dim_shared, latent_dim_foreground
)
model_fit = cplvm._fit_model_vi(
X, Y, approx_model, compute_size_factors=True, is_H0=False
)
t1 = time.time()
curr_time = t1 - t0
times_cplvm[jj, ii] = curr_time
##### CLVM (gaussian model) #####
t0 = time.time()
fit_clvm_gaussian(
X,
Y,
latent_dim_shared,
latent_dim_foreground,
compute_size_factors=False,
is_H0=False,
)
t1 = time.time()
curr_time = t1 - t0
times_clvm_gaussian[jj, ii] = curr_time
##### PCPCA #####
t0 = time.time()
pcpca = PCPCA(gamma=0.7, n_components=latent_dim_foreground)
pcpca.fit(X, Y)
pcpca.transform(X, Y)
t1 = time.time()
curr_time = t1 - t0
times_pcpca[jj, ii] = curr_time
##### CPCA #####
t0 = time.time()
cpca = CPCA(gamma=0.7, n_components=latent_dim_foreground)
cpca.fit(X, Y)
cpca.transform(X, Y)
t1 = time.time()
curr_time = t1 - t0
times_cpca[jj, ii] = curr_time
times_cplvm_df = pd.DataFrame(times_cplvm, columns=n_samples_list)
times_cplvm_df_melted = pd.melt(times_cplvm_df)
times_cplvm_df_melted["model"] = [
"cplvm" for _ in range(NUM_REPEATS * len(n_samples_list))
]
times_clvm_df = pd.DataFrame(times_clvm_gaussian, columns=n_samples_list)
times_clvm_df_melted = pd.melt(times_clvm_df)
times_clvm_df_melted["model"] = [
"clvm" for _ in range(NUM_REPEATS * len(n_samples_list))
]
times_pcpca_df = pd.DataFrame(times_pcpca, columns=n_samples_list)
times_pcpca_df_melted = pd.melt(times_pcpca_df)
times_pcpca_df_melted["model"] = [
"pcpca" for _ in range(NUM_REPEATS * len(n_samples_list))
]
times_cpca_df = | pd.DataFrame(times_cpca, columns=n_samples_list) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = np.array(["a", "b", "c", "d", "e"])
exp_categories_dropped = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0]))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, 2, 0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, -1, 0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = np.asarray(["c", "a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
exp, categories=['c', 'a', 'b']))
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
["b", np.nan, "a"], categories=["b", "a"]))
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['c', 'b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', np.nan, 'a'], dtype=object)
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
def test_mode(self):
s = Categorical([1, 1, 2, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 1, 1, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5, 1], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, np.nan, 4, 5, 4],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, 4, 5, 4], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
def test_sort(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat.sort()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Categorical(["a", "c", "b", "d"],
categories=["a", "b", "c", "d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(cat1.__array__(), exp)
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
tm.assert_equal(sliced, "d")
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_set_item_nan(self):
cat = pd.Categorical([1, 2, 3])
exp = pd.Categorical([1, np.nan, 3], categories=[1, 2, 3])
cat[1] = np.nan
self.assertTrue(cat.equals(exp))
# if nan in categories, the proper code should be set!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1] = np.nan
exp = np.array([0, 3, 2, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = np.nan
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, 1]
exp = np.array([0, 3, 0, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, np.nan]
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, np.nan, 3], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[pd.isnull(cat)] = np.nan
exp = np.array([0, 1, 3, 2])
self.assert_numpy_array_equal(cat.codes, exp)
def test_shift(self):
# GH 9416
cat = pd.Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd'])
self.assert_categorical_equal(sp1, xp1)
self.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
self.assert_categorical_equal(sn2, xp2)
self.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
self.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = pd.Categorical([1, 2, 3])
exp = cat._codes.nbytes + cat._categories.values.nbytes
self.assertEqual(cat.nbytes, exp)
def test_memory_usage(self):
cat = pd.Categorical([1, 2, 3])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertEqual(cat.nbytes, cat.memory_usage(deep=True))
cat = pd.Categorical(['foo', 'foo', 'bar'])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertTrue(cat.memory_usage(deep=True) > cat.nbytes)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
self.assertTrue(abs(diff) < 100)
def test_searchsorted(self):
# https://github.com/pydata/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk'])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts'])
c1 = pd.Categorical(s1, ordered=True)
c2 = pd.Categorical(s2, ordered=True)
# Single item array
res = c1.searchsorted(['bread'])
chk = s1.searchsorted(['bread'])
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Scalar version of single item array
# Categorical return np.array like pd.Series, but different from
# np.array.searchsorted()
res = c1.searchsorted('bread')
chk = s1.searchsorted('bread')
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present in the Categorical
res = c1.searchsorted(['bread', 'eggs'])
chk = s1.searchsorted(['bread', 'eggs'])
exp = np.array([1, 4])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present, to the right
res = c1.searchsorted(['bread', 'eggs'], side='right')
chk = s1.searchsorted(['bread', 'eggs'], side='right')
exp = np.array([3, 4]) # eggs before milk
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# As above, but with a sorter array to reorder an unsorted array
res = c2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
chk = s2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
exp = np.array([3, 5]
) # eggs after donuts, after switching milk and donuts
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
def test_deprecated_labels(self):
# TODO: labels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.codes
with tm.assert_produces_warning(FutureWarning):
res = cat.labels
self.assert_numpy_array_equal(res, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_deprecated_levels(self):
# TODO: levels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.categories
with tm.assert_produces_warning(FutureWarning):
res = cat.levels
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
res = pd.Categorical([1, 2, 3, np.nan], levels=[1, 2, 3])
self.assert_numpy_array_equal(res.categories, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_removed_names_produces_warning(self):
# 10482
with tm.assert_produces_warning(UserWarning):
Categorical([0, 1], name="a")
with tm.assert_produces_warning(UserWarning):
Categorical.from_codes([1, 2], ["a", "b", "c"], name="a")
def test_datetime_categorical_comparison(self):
dt_cat = pd.Categorical(
pd.date_range('2014-01-01', periods=3), ordered=True)
self.assert_numpy_array_equal(dt_cat > dt_cat[0], [False, True, True])
self.assert_numpy_array_equal(dt_cat[0] < dt_cat, [False, True, True])
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assert_numpy_array_equal(cat > cat[0], [False, True, True])
self.assert_numpy_array_equal(cat[0] < cat, [False, True, True])
def test_comparison_with_unknown_scalars(self):
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assertRaises(TypeError, lambda: cat < 4)
self.assertRaises(TypeError, lambda: cat > 4)
self.assertRaises(TypeError, lambda: 4 < cat)
self.assertRaises(TypeError, lambda: 4 > cat)
self.assert_numpy_array_equal(cat == 4, [False, False, False])
self.assert_numpy_array_equal(cat != 4, [True, True, True])
class TestCategoricalAsBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a', 'a', 'c',
'c', 'c'])
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False,
labels=labels)
self.cat = df
def test_dtypes(self):
# GH8143
index = ['cat', 'obj', 'num']
cat = pd.Categorical(['a', 'b', 'c'])
obj = pd.Series(['a', 'b', 'c'])
num = pd.Series([1, 2, 3])
df = pd.concat([pd.Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == 'object'
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'int64'
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'category'
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = Categorical(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
result = Categorical(['foo%05d' % i for i in range(40000)])
self.assertTrue(result.codes.dtype == 'int32')
# adding cats
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = result.add_categories(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
# removing cats
result = result.remove_categories(['foo%05d' % i for i in range(300)])
self.assertTrue(result.codes.dtype == 'int8')
def test_basic(self):
# test basic creation / coercion of categoricals
s = Series(self.factor, name='A')
self.assertEqual(s.dtype, 'category')
self.assertEqual(len(s), len(self.factor))
str(s.values)
str(s)
# in a frame
df = DataFrame({'A': self.factor})
result = df['A']
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
df = DataFrame({'A': s})
result = df['A']
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# multiples
df = DataFrame({'A': s, 'B': s, 'C': 1})
result1 = df['A']
result2 = df['B']
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
self.assertEqual(result2.name, 'B')
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# GH8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name
) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
self.assertEqual(result, expected)
result = x.person_name[0]
self.assertEqual(result, expected)
result = x.person_name.loc[0]
self.assertEqual(result, expected)
def test_creation_astype(self):
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
l = [1, 2, 3, 1]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
df = pd.DataFrame({"cats": [1, 2, 3, 4, 5, 6],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical([1, 2, 3, 4, 5, 6])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = pd.DataFrame({"cats": ['a', 'b', 'b', 'a', 'a', 'd'],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd'])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l, ordered=True))
res = s.astype('category', ordered=True)
tm.assert_series_equal(res, exp)
exp = pd.Series(Categorical(
l, categories=list('abcdef'), ordered=True))
res = s.astype('category', categories=list('abcdef'), ordered=True)
tm.assert_series_equal(res, exp)
def test_construction_series(self):
l = [1, 2, 3, 1]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
l = ["a", "b", "c", "a"]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = pd.date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_construction_frame(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([pd.Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([pd.Categorical(list('abc')), pd.Categorical(list(
'abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([pd.Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
self.assertRaises(
ValueError,
lambda: DataFrame([pd.Categorical(list('abc')),
pd.Categorical(list('abdefg'))]))
# ndim > 1
self.assertRaises(NotImplementedError,
lambda: pd.Categorical(np.array([list('abcd')])))
def test_reshaping(self):
p = tm.makePanel()
p['str'] = 'foo'
df = p.to_frame()
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo'] * len(p.major_axis))
expected = DataFrame({'A': c.copy(),
'B': c.copy(),
'C': c.copy(),
'D': c.copy()},
columns=Index(list('ABCD'), name='minor'),
index=p.major_axis.set_names('major'))
tm.assert_frame_equal(result, expected)
def test_reindex(self):
index = pd.date_range('20000101', periods=3)
# reindexing to an invalid Categorical
s = Series(['a', 'b', 'c'], dtype='category')
result = s.reindex(index)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
tm.assert_series_equal(result, expected)
# partial reindexing
expected = Series(Categorical(values=['b', 'c'], categories=['a', 'b',
'c']))
expected.index = [1, 2]
result = s.reindex([1, 2])
| tm.assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.preprocessing import StandardScaler, MinMaxScaler
import numpy as np
import pandas as pd
import logging
logging.basicConfig(format='%(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
def read_data_to_df(data_path: str, **read_data_options):
"""
read data depending on its extension and convert it to a pandas dataframe
"""
file_ext = data_path.split('.')[-1]
if file_ext == 'csv' or file_ext == 'txt':
return | pd.read_csv(data_path, **read_data_options) | pandas.read_csv |
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
tf.random.set_seed(2021)
from models import DNMC, NMC, NSurv, MLP, train_model, evaluate_model
FILL_VALUES = {
'alb': 3.5,
'pafi': 333.3,
'bili': 1.01,
'crea': 1.01,
'bun': 6.51,
'wblc': 9.,
'urine': 2502.
}
TO_DROP = ['aps', 'sps', 'surv2m', 'surv6m', 'prg2m', 'prg6m', 'dnr', 'dnrday']
TO_DROP = TO_DROP + ['sfdm2', 'hospdead']
# load, drop columns, fill using specified fill values
df = pd.read_csv('../datasets/support2.csv').drop(TO_DROP,axis=1).fillna(value=FILL_VALUES)
# get dummies for categorical vars
df = | pd.get_dummies(df, dummy_na=True) | pandas.get_dummies |
###########################################################################################################################
# SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM #
###########################################################################################################################
import os
from datetime import datetime
import numpy as np
import pandas as pd
from transform.extract.download_SIM import download_DOXXaaaa, download_table_dbf, download_table_cnv
"""
Módulo de limpeza/tratamento de dados do SIM.
"""
# Função para converter um "value" num certo "type" de objeto ou caso não seja possível utiliza o valor "default"
def tryconvert(value, default, type):
try:
return type(value)
except (ValueError, TypeError):
return default
# Classe de dados principais do SIM
class DataSimMain:
# Construtor
def __init__(self, state, year):
self.state = state
self.year = year
# Método para ler como um objeto pandas DataFrame o arquivo principal de dados do SIM e adequar e formatar suas...
# colunas e valores
def get_DOXXaaaa_treated(self):
# Lê o arquivo "dbc" ou "parquet", se já tiver sido baixado, como um objeto pandas DataFrame
dataframe = download_DOXXaaaa(self.state, self.year)
print(f'O número de linhas do arquivo DO{self.state}{self.year} é {dataframe.shape[0]}.')
for coluna in dataframe.columns.values:
dataframe[coluna] = dataframe[coluna].apply(lambda x: x if '\x00' not in x else '')
# Colunas definidas como necessárias no objeto pandas DataFrame que incrementará a tabela dobr
lista_columns = np.array(['NUMERODO', 'CODINST', 'TIPOBITO', 'DTOBITO', 'HORAOBITO', 'NUMSUS',
'NATURAL', 'CODMUNNATU', 'DTNASC', 'IDADE', 'SEXO', 'RACACOR', 'ESTCIV',
'ESC', 'ESC2010', 'OCUP', 'CODMUNRES', 'LOCOCOR', 'CODESTAB', 'CODMUNOCOR',
'TPMORTEOCO', 'ASSISTMED', 'EXAME', 'CIRURGIA', 'NECROPSIA', 'LINHAA',
'LINHAB', 'LINHAC', 'LINHAD', 'LINHAII', 'CAUSABAS', 'CRM', 'DTATESTADO',
'CIRCOBITO', 'ACIDTRAB', 'FONTE', 'TPPOS', 'DTINVESTIG', 'CAUSABAS_O',
'DTCADASTRO', 'ATESTANTE', 'FONTEINV', 'DTRECEBIM', 'ATESTADO', 'ESCMAEAGR1',
'ESCFALAGR1', 'STDOEPIDEM', 'STDONOVA', 'DIFDATA', 'DTCADINV', 'TPOBITOCOR',
'DTCONINV', 'FONTES'])
# Criação de um objeto pandas DataFrame vazio com as colunas especificadas acima
df = pd.DataFrame(columns=lista_columns)
# Colocação dos dados da variável "dataframe" na variável "df" nas colunas de mesmo nome preenchendo...
# automaticamente com o float NaN as colunas da variável "df" não presentes na variável dataframe
for col in df.columns.values:
for coluna in dataframe.columns.values:
if coluna == col:
df[col] = dataframe[coluna].tolist()
break
# Coloca na variável "dif_set" o objeto array dos nomes das colunas da variável "df" que não estão...
# presentes na variável "dataframe"
dif_set = np.setdiff1d(df.columns.values, dataframe.columns.values)
# Substitui o float NaN pela string vazia as colunas da variável "df" não presentes na variável "dataframe"
for col in dif_set:
df[col].replace(np.nan, '', inplace=True)
# Exclui o último dígito numérico das colunas identificadas, o qual corresponde ao dígito de controle do...
# código do município
# Foi detectado que para alguns municípios o cálculo do dígito de controle não é válido
# Esse dígito de controle esteve presente nos arquivos DOXXxxxx até o ano de 2005 (a confirmar!)
if len(df.loc[0, 'CODMUNNATU']) == 7:
df['CODMUNNATU'].replace(regex='.$',value='', inplace=True)
if len(df.loc[0, 'CODMUNRES']) == 7:
df['CODMUNRES'].replace(regex='.$',value='', inplace=True)
if len(df.loc[0, 'CODMUNOCOR']) == 7:
df['CODMUNOCOR'].replace(regex='.$',value='', inplace=True)
# Simplifica/corrige a apresentação dos dados das colunas especificadas
df['HORAOBITO'] = df['HORAOBITO'].apply(lambda x: x[:4] if len(x) > 4 else x)
df['NATURAL'] = df['NATURAL'].apply(lambda x: x.zfill(3))
df['OCUP'] = df['OCUP'].apply(lambda x: x.zfill(6))
df['OCUP'] = df['OCUP'].apply(str.strip)
df['OCUP'] = df['OCUP'].apply(lambda x: x if len(x) == 6 else '')
df['CODESTAB'] = df['CODESTAB'].apply(lambda x: x.zfill(7))
for col in np.array(['ESCMAEAGR1', 'ESCFALAGR1']):
for i in np.array(['00', '01', '02', '03', '04', '05', '06', '07', '08', '09']):
df[col].replace(i, str(int(i)), inplace=True)
# Atualiza/corrige os labels das colunas especificadas
df['NATURAL'].replace(['000', '999'], '', inplace=True)
df['NATURAL'].replace('800', '001', inplace=True)
df['NATURAL'].replace(['00.', '8s9'], '', inplace=True)
for col in np.array(['DTOBITO', 'DTNASC']):
df[col] = df[col].apply(lambda x: x if len(x) == 8 else '')
df[col] = df[col].apply(lambda x: x if ' ' not in x else '')
df[col] = df[col].apply(lambda x: x if '/' not in x else '')
df[col] = df[col].apply(lambda x: x if '¾' not in x else '')
df[col] = df[col].apply(lambda x: x if 'ó' not in x else '')
df[col] = df[col].apply(lambda x: x if 1 <= tryconvert(x[0:2], 0, int) <= 31 else '')
df[col] = df[col].apply(lambda x: x if 1 <= tryconvert(x[2:4], 0, int) <= 12 else '')
for col in np.array(['CODMUNNATU', 'CODMUNRES', 'CODMUNOCOR']):
df[col].replace(['000000', '150475', '421265', '422000', '431454',
'500627', '990002', '990010', '990014', '999999'], '', inplace=True)
df[col].replace([str(i) for i in range(334501, 334531)], '330455', inplace=True)
df[col].replace([str(i) for i in range(358001, 358059)], '355030', inplace=True)
df[col].replace(['530000', '530500', '530600', '530800', '530900', '531700', '539901',
'539902', '539904', '539905', '539906', '539907', '539914', '539916',
'539918', '539919', '539920', '539921', '539924', '539925'], '530010', inplace=True)
df['SEXO'].replace('1', 'M', inplace=True) # Label "M" de Masculino
df['SEXO'].replace('2', 'F', inplace=True) # Label "F" de Feminino
df['SEXO'].replace('0', '3', inplace=True)
df['SEXO'].replace('3', 'IN', inplace=True) # Label "IN" de INdefinido
df['ESTCIV'].replace(['²', '±'], '', inplace=True)
df['ESC'].replace('A', '', inplace=True)
df['OCUP'] = df['OCUP'].apply(lambda x: x if ' ' not in x else '')
df['OCUP'] = df['OCUP'].apply(lambda x: x if '.' not in x else '')
df['OCUP'] = df['OCUP'].apply(lambda x: x if '+' not in x else '')
df['OCUP'] = df['OCUP'].apply(lambda x: x if 'X' not in x else '')
df['OCUP'].replace('000000', '', inplace=True)
df['CODESTAB'].replace('0000000', '', inplace=True)
df['CODESTAB'].replace('2306840', '2461234', inplace=True)
df['CODESTAB'].replace('2464276', '2726688', inplace=True)
df['CODESTAB'].replace('2517825', '3563308', inplace=True)
df['CODESTAB'].replace('2772299', '2465140', inplace=True)
df['CODESTAB'].replace('3064115', '3401928', inplace=True)
df['TPMORTEOCO'].replace('8', '6', inplace=True)
df['TPMORTEOCO'].replace('9', '7', inplace=True)
for col in np.array(['ASSISTMED', 'EXAME', 'CIRURGIA', 'NECROPSIA', 'ACIDTRAB']):
df[col].replace(['0', '3', '4', '5', '6', '7', '8', '9'], '', inplace=True)
df[col].replace('2', '0', inplace=True) # "2", representativo de "Não", é convertido para o objeto...
# string "0" do domínio binário
for col in np.array(['CAUSABAS', 'CAUSABAS_O']):
df[col].replace('B501', 'B508', inplace=True)
df[col].replace('B656', 'B653', inplace=True)
df[col].replace('C141', 'C140', inplace=True)
df[col].replace('M723', 'M724', inplace=True)
df[col].replace('M725', 'M728', inplace=True)
df[col].replace('N975', 'N978', inplace=True)
df[col].replace('Q314', 'P288', inplace=True)
df[col].replace('Q350', 'Q351', inplace=True)
df[col].replace('Q352', 'Q353', inplace=True)
df[col].replace('Q354', 'Q355', inplace=True)
df[col].replace(['Q356', 'Q358'], 'Q359', inplace=True)
df[col].replace('R500', 'R508', inplace=True)
df[col].replace('R501', 'R500', inplace=True)
df[col].replace(['X590', 'X591', 'X592', 'X593', 'X594',
'X595', 'X596', 'X597', 'X598'], 'X599', inplace=True)
df[col].replace('Y34', 'Y349', inplace=True)
df[col].replace('Y447', 'Y448', inplace=True)
df['CAUSABAS_O'].replace(regex='.$',value='', inplace=True)
df['TPPOS'].replace('2', '0', inplace=True) # "2", representativo de "Não", é convertido para o objeto...
# string "0" do domínio binário
df['DTATESTADO'].replace('09201608', '', inplace=True)
df['DTATESTADO'] = df['DTATESTADO'].apply(lambda x: x if len(x) == 8 else '')
df['DTATESTADO'] = df['DTATESTADO'].apply(lambda x: x if x[2:4] != '20' else '')
df['CIRCOBITO'].replace(['á', 'ß', 'C'], '', inplace=True)
for col in np.array(['ESCMAEAGR1', 'ESCFALAGR1']):
df[col].replace('9', '', inplace=True)
df['TPOBITOCOR'].replace('0', '', inplace=True)
# Atribui um único label para uma mesma significação nas colunas especificadas
df['TIPOBITO'].replace(['0', '3', '4', '5', '6', '7', '8' '9'], '', inplace=True)
for col in np.array(['RACACOR', 'ESTCIV', 'ESC', 'LOCOCOR', 'ATESTANTE']):
df[col].replace(['0', '6', '7', '8', '9'], '', inplace=True)
df['ESC2010'].replace(['6', '7', '8', '9'], '', inplace=True)
df['TPMORTEOCO'].replace(['0', '7', '8', '9'], '', inplace=True)
for col in np.array(['CIRCOBITO', 'FONTE']):
df[col].replace(['0', '5', '6', '7', '8', '9'], '', inplace=True)
df['FONTEINV'].replace(['0', '9'], '', inplace=True)
# Substitui uma string vazia pela string "NA" nas colunas de foreign keys
# A coluna FONTES é apenas considerada como tal pois recebe tratamento específico mais adiante
for col in np.array(['TIPOBITO', 'NATURAL', 'CODMUNNATU', 'RACACOR',
'ESTCIV', 'ESC', 'ESC2010', 'OCUP', 'CODMUNRES',
'LOCOCOR', 'CODESTAB', 'CODMUNOCOR', 'TPMORTEOCO',
'CAUSABAS', 'CIRCOBITO', 'FONTE', 'CAUSABAS_O',
'ATESTANTE', 'FONTEINV', 'ESCMAEAGR1', 'ESCFALAGR1',
'TPOBITOCOR', 'FONTES']):
df[col].replace('', 'NA', inplace=True)
# Substitui uma string vazia por None nas colunas de atributos especificadas
for col in np.array(['CODINST', 'HORAOBITO', 'NUMSUS', 'SEXO', 'LINHAA',
'LINHAB', 'LINHAC', 'LINHAD', 'LINHAII', 'CRM', 'ATESTADO']):
df[col].replace('', None, inplace=True)
# Divisão da coluna "FONTES" em seis colunas conforme Dicionário de Dados da Tabela DOM ("M" de...
# investigação materna)
df['FONTES'] = df['FONTES'].apply(lambda x: x if len(x) == 6 else x)
for col in np.array(['FONTENTREV', 'FONTEAMBUL', 'FONTEPRONT', 'FONTESVO', 'FONTEIML', 'FONTEPROF']):
df[col] = df['FONTES'].apply(lambda x: 'NA' if x == 'NA' else x[0]) # O valor quando a condição...
# "else" se verifica é "S" de "Sim"
df[col].replace('X', '0', inplace=True) # Substitui a string "X" por "0" de "Não" tornando a...
# coluna "col" com domínio "binário"
df[col].replace('S', '1', inplace=True) # Substitui a string "X" por "1" de "Sim" tornando a...
# coluna "col" com domínio "binário"
# Eliminação da coluna "FONTES" por se tornar desnecessária com a adição das seis colunas especificadas acima
df.drop('FONTES', axis=1, inplace=True)
# Converte do tipo string para datetime as colunas especificadas substituindo as datas faltantes ("NaT") pela...
# data futura "2099-01-01" para permitir a inserção das referidas colunas no SGBD postgreSQL
for col in np.array(['DTOBITO', 'DTNASC', 'DTATESTADO', 'DTINVESTIG',
'DTCADASTRO', 'DTRECEBIM', 'DTCADINV', 'DTCONINV']):
df[col] = df[col].apply(lambda x: datetime.strptime(x, '%d%m%Y').date() \
if x != '' else datetime(2099, 1, 1).date())
# Verifica se as datas das colunas especificadas são absurdas e em caso afirmativo as substitui pela...
# data futura "2099-01-01"
for col in np.array(['DTOBITO', 'DTATESTADO', 'DTINVESTIG',
'DTCADASTRO', 'DTRECEBIM', 'DTCADINV', 'DTCONINV']):
df[col] = df[col].apply(lambda x: x if datetime(2000, 12, 31).date() < x < \
datetime(2020, 12, 31).date() else datetime(2099, 1, 1).date())
df['DTNASC'] = df['DTNASC'].apply(lambda x: x if datetime(1850, 12, 31).date() < x < \
datetime(2020, 12, 31).date() else datetime(2099, 1, 1).date())
# Computa a diferença entre as datas de óbito e de nascimento em dias e a aloca como a coluna "IDADE"...
# do objeto pandas DataFrame
df['IDADE'] = df['DTOBITO'] - df['DTNASC']
# Converte os valores da coluna IDADE de datetime.timedelta para string
# Ainda na mesma linha, cria uma lista de dois objetos string de cada valor da coluna IDADE e aproveita...
# apenas o primeiro objeto de cada lista
df['IDADE'] = df['IDADE'].apply(lambda x: str(x).split(' day')[0])
# Os valores em que a operação anterior forneceu a string "0:00:00" são substituídos pela string...
# "0" (RN que viveram menos de um dia)
df['IDADE'].replace('0:00:00', '0', inplace=True)
# Converte os valores da coluna IDADE de string para float (em dias) atribuindo o float NaN para as...
# string que começam com "-"
df['IDADE'] = df['IDADE'].apply(lambda x: np.nan if x[0] == '-' else float(x))
# Transforma o valor da coluna referida de dias para anos mantendo cinco casas decimais
df['IDADE']=df['IDADE'].div(365).round(5)
# Converte do tipo object para int ou para None as colunas de atributos de valores binários (0 ou 1)
for col in np.array(['ASSISTMED', 'EXAME', 'CIRURGIA', 'NECROPSIA', 'ACIDTRAB',
'TPPOS', 'STDOEPIDEM', 'STDONOVA', 'FONTENTREV', 'FONTEAMBUL',
'FONTEPRONT', 'FONTESVO', 'FONTEIML', 'FONTEPROF']):
df[col] = df[col].apply(lambda x: tryconvert(x, None, int))
# Converte do tipo object para float sem casas decimais as colunas de atributos de valores...
# representativos de quantidades ou para o valor None caso a coluna esteja com a string vazia
df['DIFDATA'] = df['DIFDATA'].apply(lambda x: round(float(x),0) if x != '' else None)
# Reordena o objeto pandas DataFrame por ordem crescente de valores da coluna DTOBITO
df.sort_values(by=['DTOBITO'], inplace=True)
# Renumera os índices devido à alteração efetivada no passo anterior
df.reset_index(drop=True, inplace=True)
# Renomeia colunas
df.rename(index=str, columns={'TIPOBITO': 'TIPOBITO_ID', 'NATURAL': 'NATURALE_ID',
'CODMUNNATU': 'CODMUNNATU_ID', 'RACACOR': 'RACACOR_ID',
'ESTCIV': 'ESTCIV_ID', 'ESC': 'ESC_ID',
'ESC2010': 'ESC2010_ID', 'OCUP': 'OCUP_ID',
'CODMUNRES': 'CODMUNRES_ID', 'LOCOCOR': 'LOCOCOR_ID',
'CODESTAB': 'CODESTAB_ID', 'CODMUNOCOR': 'CODMUNOCOR_ID',
'TPMORTEOCO': 'TPMORTEOCO_ID', 'CAUSABAS': 'CAUSABAS_ID',
'CIRCOBITO': 'CIRCOBITO_ID', 'FONTE': 'FONTE_ID',
'CAUSABAS_O': 'CAUSABAS_O_ID', 'ATESTANTE': 'ATESTANTE_ID',
'FONTEINV': 'FONTEINV_ID', 'ESCMAEAGR1': 'ESCMAEAGR1_ID',
'ESCFALAGR1': 'ESCFALAGR1_ID', 'TPOBITOCOR': 'TPOBITOCOR_ID'}, inplace=True)
print(f'Tratou o arquivo DO{self.state}{self.year} (shape final: {df.shape[0]} x {df.shape[1]}).')
return df
# Classe de dados auxiliares do SIM
class DataSimAuxiliary:
# Construtor
def __init__(self, path):
self.path = path
# Função para adequar e formatar as colunas e valores da TCC TIPOBITO (arquivo TIPOBITO.cnv)
def get_TIPOBITO_treated(self):
# Conversão da TCC TIPOBITO para um objeto pandas DataFrame
file_name = 'TIPOBITO'
df = download_table_cnv(file_name)
# Renomeia a coluna SIGNIFICACAO
df.rename(index=str, columns={'SIGNIFICACAO': 'TIPO'}, inplace=True)
# Inserção da primary key "NA" na tabela de que trata esta função para retratar "missing value"
df.loc[df.shape[0]] = ['NA', 'NOT AVAILABLE']
return df
# Função para adequar e formatar as colunas e valores da TCC NAT1212 (arquivo NAT1212.cnv)
def get_NAT1212_treated(self):
# Conversão da TCC NAT1212 para um objeto pandas DataFrame
file_name = 'NAT1212'
df = download_table_cnv(file_name)
# Renomeia a coluna SIGNIFICACAO
df.rename(index=str, columns={'SIGNIFICACAO': 'LOCAL'}, inplace=True)
# Adiciona zeros à esquerda nos valores (tipo string) da coluna "ID" do objeto "df" até formar uma...
# "string" de tamanho = 3
df['ID'] = df['ID'].apply(lambda x: x.zfill(3))
# Upload do arquivo "xlsx" que contém os NATURAL presentes nos arquivos DOXXxxxx (a partir do ano...
# de 2001) e não presentes na TCC NAT1212. Ou seja, isso parece ser uma falha dos dados do Datasus
dataframe = pd.read_excel(self.path + 'NATURAL_OUT_NAT1212_ANOS_1997_2017' + '.xlsx')
# Converte a coluna "ID" do objeto "dataframe" de "int" para "string"
dataframe['ID'] = dataframe['ID'].astype('str')
# Adiciona zeros à esquerda nos valores (tipo string) da coluna "ID" do objeto "dataframe" até...
# formar uma "string" de tamanho = 3
dataframe['ID'] = dataframe['ID'].apply(lambda x: x.zfill(3))
# Adiciona a coluna "LOCAL" e respectivos valores ao objeto "dataframe"
dataframe['LOCAL'] = ['NAO PROVIDO NA TCC NAT1212'] * (dataframe.shape[0])
# Concatenação do objeto "dataframe" ao objeto "df"
frames = []
frames.append(df)
frames.append(dataframe)
dfinal = pd.concat(frames, ignore_index=True)
# Elimina eventuais linhas duplicadas tendo por base a coluna ID e mantém a primeira ocorrência
dfinal.drop_duplicates(subset='ID', keep='first', inplace=True)
# Ordena eventualmente as linhas por ordem crescente dos valores da coluna ID
dfinal.sort_values(by=['ID'], inplace=True)
# Reset eventualmente o index devido ao sorting prévio e à eventual eliminação de duplicates
dfinal.reset_index(drop=True, inplace=True)
# Inserção da primary key "NA" na tabela de que trata esta função para retratar célula de...
# string vazia da coluna "NATURAL_ID" da tabela DOBR
dfinal.loc[dfinal.shape[0]] = ['NA', 'NOT AVAILABLE']
return dfinal
# Função para adequar e formatar as colunas e valores da Tabela TABUF (arquivo TABUF.dbf)
def get_TABUF_treated(self):
# Conversão da Tabela TABUF para um objeto pandas DataFrame
file_name = 'TABUF'
df = download_table_dbf(file_name)
# Renomeia colunas especificadas
df.rename(index=str, columns={'CODIGO': 'ID', 'DESCRICAO': 'ESTADO'}, inplace=True)
# Reordena as colunas
df = df[['ID', 'ESTADO', 'SIGLA_UF']]
# Inserção da primary key "NA" na tabela de que trata esta função para retratar "missing value"
df.loc[df.shape[0]] = ['NA', 'NOT AVAILABLE', '?']
return df
# Função para adequar e formatar as colunas e valores da Tabela RSAUDE (do IBGE)
def get_RSAUDE_treated(self):
# Conversão da Tabela RSAUDE (em formato "xlsx") para um objeto pandas DataFrame
df = pd.read_excel(self.path + 'RSAUDE' + '.xlsx')
# Renomeia a coluna SIGNIFICACAO
df.rename(index=str, columns={'SIGNIFICACAO': 'REGIAO'}, inplace=True)
# Converte para string a coluna especificada
df['ID'] = df['ID'].astype('str')
# Inserção da primary key "NA" na tabela de que trata esta função para retratar "missing value"
df.loc[df.shape[0]] = ['NA', 'NOT AVAILABLE']
return df
# Método para adequar e formatar as colunas e valores da Tabela CADMUN (arquivo CADMUN.dbf)
def get_CADMUN_treated(self):
# Conversão da Tabela CADMUN para um objeto pandas DataFrame
file_name = 'CADMUN'
df1 = download_table_dbf(file_name)
# Renomeia as colunas especificadas
df1.rename(index=str, columns={'MUNCOD': 'ID', 'UFCOD': 'UFCOD_ID'}, inplace=True)
# Drop a linha inteira em que a coluna "ID" tem o valor especificado por não representar nenhum município
df1 = df1.drop(df1[df1['ID']=='000000'].index)
# Remove colunas indesejáveis do objeto pandas DataFrame
df1 = df1.drop(['MUNSINON', 'MUNSINONDV', 'MESOCOD', 'MICROCOD', 'MSAUDCOD',
'RSAUDCOD', 'CSAUDCOD', 'RMETRCOD', 'AGLCOD'], axis=1)
# Substitui uma string vazia pela string "?" nas colunas especificadas
for col in ['SITUACAO', 'MUNSINP', 'MUNSIAFI', 'MUNNOME', 'MUNNOMEX', 'OBSERV',
'AMAZONIA', 'FRONTEIRA', 'CAPITAL', 'ANOINST', 'ANOEXT', 'SUCESSOR']:
df1[col].replace('', '?', inplace=True)
# Substitui uma string vazia pela string "NA" nas colunas especificadas
df1['UFCOD_ID'].replace('', 'NA', inplace=True)
# Substitui uma string vazia pelo float "NaN" nas colunas especificadas
for col in ['LATITUDE', 'LONGITUDE', 'ALTITUDE', 'AREA']:
df1[col].replace('', np.nan, inplace=True)
# Converte do tipo object para float as colunas especificadas
df1[['LATITUDE', 'LONGITUDE', 'ALTITUDE', 'AREA']] = \
df1[['LATITUDE', 'LONGITUDE', 'ALTITUDE', 'AREA']].astype('float')
# Coloca todas as string das colunas especificadas como UPPER CASE
df1['MUNNOME'] = df1['MUNNOME'].apply(lambda x: x.upper())
df1['MUNNOMEX'] = df1['MUNNOMEX'].apply(lambda x: x.upper())
# Insere uma linha referente ao Município de Nazária/PI não constante originalmente do arquivo
df1.loc[df1.shape[0]] = ['220672', '2206720', 'ATIVO', '?', '?', 'NAZÁRIA', 'NAZARIA', '?',
'N', 'N', 'N', '22', '?', '?', '?', np.nan, np.nan, np.nan, 363.589]
# Ordena as linhas de "df1" por ordem crescente dos valores da coluna ID
df1.sort_values(by=['ID'], inplace=True)
# Reset o index devido ao sorting prévio e à exclusão e inclusão das linhas referidas acima
df1.reset_index(drop=True, inplace=True)
# Conversão da Tabela rl_municip_regsaud para um objeto pandas DataFrame
file_name = 'rl_municip_regsaud'
df2 = download_table_dbf(file_name)
# Renomeia as colunas especificadas
df2.rename(index=str, columns={'CO_MUNICIP': 'ID', 'CO_REGSAUD': 'RSAUDE_ID'}, inplace=True)
# Faz o merge de "df1" e "df2" pela coluna ID tendo por base "df1"
df = pd.merge(df1, df2, how='left', left_on='ID', right_on='ID')
# Converte o float NaN para a string "NA"
df['RSAUDE_ID'].replace(np.nan, 'NA', inplace=True)
# Reordena as colunas priorizando as "mais" relevantes
df = df[['ID', 'MUNNOME', 'MUNNOMEX', 'MUNCODDV', 'OBSERV', 'SITUACAO', 'MUNSINP',
'MUNSIAFI', 'UFCOD_ID', 'AMAZONIA', 'FRONTEIRA', 'CAPITAL', 'RSAUDE_ID',
'LATITUDE', 'LONGITUDE', 'ALTITUDE', 'AREA', 'ANOINST', 'ANOEXT', 'SUCESSOR']]
# Inserção da primary key "NA" na tabela de que trata esta função para retratar "missing value"
df.loc[df.shape[0]] = ['NA', 'NOT AVAILABLE', '?', '?', '?', '?', '?', '?', 'NA', '?',
'?', '?', 'NA', np.nan, np.nan, np.nan, np.nan, '?', '?', '?']
return df
# Função para adequar e formatar as colunas e valores da TCC RACA (arquivo RACA.cnv)
def get_RACA_treated(self):
# Conversão da TCC RACA para um objeto pandas DataFrame
file_name = 'RACA'
df = download_table_cnv(file_name)
# Renomeia a coluna SIGNIFICACAO
df.rename(index=str, columns={'SIGNIFICACAO': 'TIPO'}, inplace=True)
# Inserção da primary key "NA" na tabela de que trata esta função para retratar "missing value"
df.loc[df.shape[0]] = ['NA', 'NOT AVAILABLE']
return df
# Função para adequar e formatar as colunas e valores da TCC ESTCIV (arquivo ESTCIV.cnv)
def get_ESTCIV_treated(self):
# Conversão da TCC ESTCIV para um objeto pandas DataFrame
file_name = 'ESTCIV'
df = download_table_cnv(file_name)
# Renomeia a coluna SIGNIFICACAO
df.rename(index=str, columns={'SIGNIFICACAO': 'SITUACAO'}, inplace=True)
# Inserção da primary key "NA" na tabela de que trata esta função para retratar "missing value"
df.loc[df.shape[0]] = ['NA', 'NOT AVAILABLE']
return df
# Função para adequar e formatar as colunas e valores da TCC INSTRUC (arquivo INSTRUC.cnv)
def get_INSTRUC_treated(self):
# Conversão da TCC INSTRUC para um objeto pandas DataFrame
file_name = 'INSTRUC'
df = download_table_cnv(file_name)
# Renomeia a coluna SIGNIFICACAO
df.rename(index=str, columns={'SIGNIFICACAO': 'FAIXA_DE_ANOS_INSTRUCAO'}, inplace=True)
# Inserção da primary key "NA" na tabela de que trata esta função para retratar "missing value"
df.loc[df.shape[0]] = ['NA', 'NOT AVAILABLE']
return df
# Função para adequar e formatar as colunas e valores da TCC ESCSERIE (arquivo ESCSERIE.cnv)
def get_ESCSERIE_treated(self):
# Conversão da TCC ESCSERIE para um objeto pandas DataFrame
file_name = 'ESCSERIE'
df = download_table_cnv(file_name)
# Renomeia a coluna SIGNIFICACAO
df.rename(index=str, columns={'SIGNIFICACAO': 'ESCOLARIDADE'}, inplace=True)
# Inserção da primary key "NA" na tabela de que trata esta função para retratar "missing value"
df.loc[df.shape[0]] = ['NA', 'NOT AVAILABLE']
return df
# Função para adequar e formatar as colunas e valores da Tabela TABOCUP (arquivo TABOCUP.dbf)
# e das TCC CBO2002 e OCUPA (arquivos CBO2002.cnv e OCUPA.cnv, respectivamente)
def get_TABOCUP_2TCC_treated(self):
# Conversão da Tabela TABOCUP para um objeto pandas DataFrame
file_name = 'TABOCUP'
df1 = download_table_dbf(file_name)
# Renomeia as colunas especificadas
df1.rename(index=str, columns={'CODIGO': 'ID', 'DESCRICAO': 'OCUPACAO'}, inplace=True)
# Ordena as linhas de "df1" por ordem crescente dos valores da coluna ID
df1.sort_values(by=['ID'], inplace=True)
# Elimina eventuais linhas duplicadas tendo por base a coluna ID e mantém a primeira ocorrência
df1.drop_duplicates(subset='ID', keep='first', inplace=True)
# Reset o index devido ao sorting prévio
df1.reset_index(drop=True, inplace=True)
# Adiciona zeros à esquerda nos valores (tipo string) da coluna "ID" do objeto "df1" até formar...
# uma "string" de tamanho = 6
df1['ID'] = df1['ID'].apply(lambda x: x.zfill(6))
# Conversão da TCC CBO2002 para um objeto pandas DataFrame
file_name = 'CBO2002'
df2 = download_table_cnv(file_name)
# Renomeia a coluna SIGNIFICACAO
df2.rename(index=str, columns={'SIGNIFICACAO': 'OCUPACAO'}, inplace=True)
# Adiciona zeros à esquerda nos valores (tipo string) da coluna "ID" do objeto "df2" até formar...
# uma "string" de tamanho = 6
df2['ID'] = df2['ID'].apply(lambda x: x.zfill(6))
# Conversão da TCC OCUPA para um objeto pandas DataFrame
file_name = 'OCUPA'
df3 = download_table_cnv(file_name)
# Renomeia a coluna SIGNIFICACAO
df3.rename(index=str, columns={'SIGNIFICACAO': 'OCUPACAO'}, inplace=True)
# Adiciona zeros à esquerda nos valores (tipo string) da coluna "ID" do objeto "df3" até formar...
# uma "string" de tamanho = 6
df3['ID'] = df3['ID'].apply(lambda x: x.zfill(6))
# Concatena os três objetos pandas DataFrame
frames = []
frames.append(df1)
frames.append(df2)
frames.append(df3)
df = pd.concat(frames, ignore_index=True)
# Elimina linhas duplicadas tendo por base a coluna ID e mantém a primeira ocorrência
df.drop_duplicates(subset='ID', keep='first', inplace=True)
# Ordena as linhas por ordem crescente dos valores da coluna "ID"
df.sort_values(by=['ID'], inplace=True)
# Reset o index devido ao sorting prévio e à eventual eliminação de duplicates
df.reset_index(drop=True, inplace=True)
# Upload do arquivo "xlsx" que contém os OCUP presentes nos arquivos DOXXaaaa (dos anos de...
# 1997 a 2017) e não presentes na Tabela TABOCUP e nas TCC CBO2002 e OCUPA. Ou seja, isso...
# parece ser uma falha dos dados do Datasus
dataframe = pd.read_excel(self.path + 'OCUP_OUT_TABOCUP_E_2TCC_ANOS_1997_2017' + '.xlsx')
# Converte a coluna "ID" do objeto "dataframe" de "int" para "string"
dataframe['ID'] = dataframe['ID'].astype('str')
# Adiciona zeros à esquerda nos valores (tipo string) da coluna "ID" do objeto "dataframe" até formar...
# uma "string" de tamanho = 6
dataframe['ID'] = dataframe['ID'].apply(lambda x: x.zfill(6))
# Adiciona a coluna "OCUPACAO" e respectivos valores ao objeto "dataframe"
dataframe['OCUPACAO'] = ['NAO PROVIDO EM TABOCUP.DBF E NAS TCC CBO2002/OCUPA'] * (dataframe.shape[0])
# Concatenação do objeto "dataframe" ao objeto "df"
frames = []
frames.append(df)
frames.append(dataframe)
dfinal = pd.concat(frames, ignore_index=True)
# Elimina eventuais linhas duplicadas tendo por base a coluna ID e mantém a primeira ocorrência
dfinal.drop_duplicates(subset='ID', keep='first', inplace=True)
# Ordena eventualmente as linhas por ordem crescente dos valores da coluna ID
dfinal.sort_values(by=['ID'], inplace=True)
# Reset eventualmente o index devido ao sorting prévio e à eventual eliminação de duplicates
dfinal.reset_index(drop=True, inplace=True)
# Inserção da primary key "NA" na tabela de que trata esta função para retratar "missing value"
dfinal.loc[dfinal.shape[0]] = ['NA', 'NOT AVAILABLE']
return dfinal
# Função para adequar e formatar as colunas e valores da TCC LOCOCOR (arquivo LOCOCOR.cnv)
def get_LOCOCOR_treated(self):
# Conversão da TCC LOCOCOR para um objeto pandas DataFrame
file_name = 'LOCOCOR'
df = download_table_cnv(file_name)
# Renomeia a coluna SIGNIFICACAO
df.rename(index=str, columns={'SIGNIFICACAO': 'LUGAR'}, inplace=True)
# Converte para string a coluna especificada
df['ID'] = df['ID'].astype('str')
# Inserção da primary key "NA" na tabela de que trata esta função para retratar "missing value"
df.loc[df.shape[0]] = ['NA', 'NOT AVAILABLE']
return df
# Função para adequar e formatar as colunas e valores da Tabela CNESDO18 do SIM (arquivo CNESDO18.dbf)...
# e da TCC ESTAB06 (arquivo ESTAB06.cnv)
# Além disso faz o "merge" a elas das TCC ESFERA e NAT_ORG (arquivos ESFERA.cnv e NAT_ORG.cnv, respectivamente)
def get_CNESDO18_3TCC_treated(self):
# Conversão da Tabela CNESDO18 para um objeto pandas DataFrame
file_name = 'CNESDO18'
df1 = download_table_dbf(file_name)
# Ordena as linhas de "df1" por ordem crescente dos valores da coluna CODESTAB
df1.sort_values(by=['CODESTAB'], inplace=True)
# Elimina eventuais linhas duplicadas tendo por base a coluna CODESTAB e mantém a primeira ocorrência
df1.drop_duplicates(subset='CODESTAB', keep='first', inplace=True)
# Reset o index devido ao sorting prévio
df1.reset_index(drop=True, inplace=True)
# Adiciona zeros à esquerda nos valores (tipo string) da coluna "CODESTAB" do objeto "df1" até formar...
# uma "string" de tamanho = 7
df1['CODESTAB'] = df1['CODESTAB'].apply(lambda x: x.zfill(7))
# Conversão da TCC ESTAB06 para um objeto pandas DataFrame
file_name = 'ESTAB06'
df2 = download_table_cnv(file_name)
df2.rename(index=str, columns={'ID': 'CODESTAB', 'SIGNIFICACAO': 'DESCESTAB'}, inplace=True)
# Adiciona zeros à esquerda nos valores (tipo string) da coluna "CODESTAB" do objeto "df2" até formar...
# uma "string" de tamanho = 7
df2['CODESTAB'] = df2['CODESTAB'].apply(lambda x: x.zfill(7))
# Concatena os dois objetos pandas DataFrame
frames = []
frames.append(df1)
frames.append(df2)
df = pd.concat(frames, ignore_index=True)
# Elimina linhas duplicadas
df.drop_duplicates(subset='CODESTAB', keep='first', inplace=True)
# Ordena as linhas por ordem crescente dos valores da coluna "CODESTAB"
df.sort_values(by=['CODESTAB'], inplace=True)
# Reseta os índices
df.reset_index(drop=True, inplace=True)
# Conversão da TCC ESFERA18 para um objeto pandas DataFrame
file_name = 'ESFERA18'
df3 = download_table_cnv(file_name)
# Adequa e formata a TCC ESFERA18
df3.rename(index=str, columns={'ID': 'CODESTAB', 'SIGNIFICACAO': 'ESFERA'}, inplace=True)
# Adiciona zeros à esquerda nos valores (tipo string) da coluna "CODESTAB" do objeto "df3" até formar...
# uma "string" de tamanho = 7
df3['CODESTAB'] = df3['CODESTAB'].apply(lambda x: x.zfill(7))
# Conversão da TCC NAT_ORG (já em formato "xlsx" e não "cnv") para um objeto pandas DataFrame
file_name = 'NAT_ORG'
df4 = download_table_cnv(file_name)
# Adequa e formata a TCC NAT_ORG
df4.rename(index=str, columns={'ID': 'CODESTAB', 'SIGNIFICACAO': 'REGIME'}, inplace=True)
# Adiciona zeros à esquerda nos valores (tipo string) da coluna "CODESTAB" do objeto "df4" até formar...
# uma "string" de tamanho = 7
df4['CODESTAB'] = df4['CODESTAB'].apply(lambda x: x.zfill(7))
# Realiza o "merge" da TCC ESFERA18 à TCC NAT_ORG
df5 = df3.append(df4, sort=False)
df6 = df5.replace(np.nan,'').groupby('CODESTAB',as_index=False).agg(''.join)
df6.sort_values(by=['CODESTAB'], inplace=True)
df6.reset_index(drop=True, inplace=True)
# Realiza o "merge" da TCC ESFERA18 (+ TCC NAT_ORG) à (Tabela CNESDO18 + TCC ESTAB06)
df7 = df.append(df6, sort=False)
df8 = df7.replace(np.nan,'').groupby('CODESTAB',as_index=False).agg(''.join)
df8.sort_values(by=['CODESTAB'], inplace=True)
df8.reset_index(drop=True, inplace=True)
# Substitui os valores de string vazia das colunas especificadas pela string "?"
df8['DESCESTAB'].replace('','?', inplace=True)
df8['ESFERA'].replace('','?', inplace=True)
df8['REGIME'].replace('','?', inplace=True)
# Upload do arquivo "xlsx" que contém os CODESTAB presentes nos arquivos DOXXaaaa (dos anos de...
# 1997 a 2017) e não presentes na tabela CNESDO18 e nas TCC ESTAB06, ESFERA18 e NAT_ORG. Ou seja,...
# isso parece ser uma falha dos dados do Datasus
dataframe = pd.read_excel(self.path + 'CODESTAB_OUT_CNESDO18_E_3TCC_ANOS_1997_2017' + '.xlsx')
# Converte a coluna "CODESTAB" do objeto "dataframe" de "int" para "string"
dataframe['CODESTAB'] = dataframe['CODESTAB'].astype('str')
# Adiciona zeros à esquerda nos valores (tipo string) da coluna "CODESTAB" do objeto "dataframe" até formar...
# uma "string" de tamanho = 7
dataframe['CODESTAB'] = dataframe['CODESTAB'].apply(lambda x: x.zfill(7))
# Adiciona as colunas "DESCESTAB", "ESFERA" e "REGIME" e respectivos valores ao objeto "dataframe"
dataframe['DESCESTAB'] = ['NAO PROVIDO EM CNESDO18.DBF E NAS TCC ESTAB06/ESFERA18/NAT_ORG'] * (dataframe.shape[0])
dataframe['ESFERA'] = ['?'] * (dataframe.shape[0])
dataframe['REGIME'] = ['?'] * (dataframe.shape[0])
# Concatenação do objeto "dataframe" ao objeto "df8"
frames = []
frames.append(df8)
frames.append(dataframe)
dfinal = pd.concat(frames, ignore_index=True)
# Renomeia a coluna "CODESTAB"
dfinal.rename(index=str, columns={'CODESTAB': 'ID'}, inplace=True)
# Elimina eventuais linhas duplicadas tendo por base a coluna ID e mantém a primeira ocorrência
dfinal.drop_duplicates(subset='ID', keep='first', inplace=True)
# Ordena eventualmente as linhas por ordem crescente dos valores da coluna ID
dfinal.sort_values(by=['ID'], inplace=True)
# Reset eventualmente o index devido ao sorting prévio e à eventual eliminação de duplicates
dfinal.reset_index(drop=True, inplace=True)
# Inserção da primary key "NA" na tabela de que trata esta função para retratar "missing value"
dfinal.loc[dfinal.shape[0]] = ['NA', 'NOT AVAILABLE', '?', '?']
return dfinal
# Função para adequar e formatar as colunas e valores da Tabela TPMORTEOCO ("constando" apenas...
# do Dicionário de Dados do SIM)
def get_TPMORTEOCO_treated(self):
# Conversão da Tabela TPMORTEOCO (em formato "xlsx") para um objeto pandas DataFrame
df = | pd.read_excel(self.path + 'TPMORTEOCO' + '.xlsx') | pandas.read_excel |
"""
Test for issue 72:
https://github.com/pandas-profiling/pandas-profiling/issues/72
"""
import numpy as np
import pandas as pd
import mars_profiling
from mars_profiling.config import config
from mars_profiling.model.base import Variable
def test_issue72_higher():
# Showcase (and test) different ways of interfacing with config/profiling report
config["vars"]["num"]["low_categorical_threshold"].set(2)
config["correlations"]["recoded"]["calculate"].set(False)
df = | pd.DataFrame({"A": [1, 2, 3, 3]}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 13 15:37:25 2020
@author: guanhua
"""
import re
import pandas as pd
from datetime import datetime
def getRemainingLeaseYear(row):
r = row.split('\n')[0]
r = re.search('\d+', r).group()
return int(r)
def getRemainingLeaseMonth(row):
r = row.split('\n')
if len(r) > 1:
r = r[1]
r = re.search('\d+', r).group()
return int(r)
else:
return int(0)
def getFloorArea(row):
return float(row.split('\n')[0])
def getModelType(row):
try:
r = row.split('\n')[1]
return r
except:
return None
def cleanPrice(row):
r = re.sub('[^0-9]','',row)
return float(r.strip('$'))/100
def getTownName(row):
r = row.split()[1:]
return ' '.join(r)
def preprocessHDBdf(hdb):
hdb['Street'] = hdb['Street'].str.strip()
# flat_type_conversion = {1:'1-Room',2:'2-Room',3:'3-Room',4:'4-Room',5:'5-Room',6:'Executive',8:'Multi-Generation'}
# hdb.replace({'Room Type': flat_type_conversion}, inplace=True)
hdb['Remaining Lease (Year)'] = hdb['Remaining Lease'].apply(getRemainingLeaseYear)
hdb['Remaining Lease (Month)'] = hdb['Remaining Lease'].apply(getRemainingLeaseMonth)
hdb['Remaining Lease in Months'] = hdb['Remaining Lease (Year)']*12 + hdb['Remaining Lease (Month)']
# hdb['Floor Area Sqm'] = hdb['Sqm'].apply(getFloorArea)
# hdb['Model Type'] = hdb['Sqm'].apply(getModelType)
hdb['Price'] = hdb['Price'].apply(cleanPrice)
hdb['Price'] = hdb['Price'].astype(float)
# hdb['Town Name'] = hdb['Town'].apply(getTownName)
SQM_to_SQFT = 10.764
hdb['Sqm'] = hdb['Floor Area'].copy()
hdb['Sqft'] = hdb['Sqm']*SQM_to_SQFT
hdb['Price per Sqm'] = hdb['Price']/hdb['Sqm']
hdb['Price per Sqft'] = hdb['Price']/hdb['Sqft']
hdb['Price per Sqft per Remaining Lease year'] = hdb['Price per Sqft']/hdb['Remaining Lease in Months']*12
hdb['Storey Range + Flat Type'] = hdb['Storey'] + ' ' + hdb['Flat Type']
hdb['Resale Registration Date'] = | pd.to_datetime(hdb['Resale Registration Date'], format='%b %Y') | pandas.to_datetime |
#! /usr/bin/env python3
import pandas as pd
import numpy as np
import glob
from datetime import datetime
from dateutil.parser import parse
from elasticsearch import Elasticsearch
def _load_vmstat(monitoring_data):
monitoring_data["timestamp"] = pd.to_datetime(monitoring_data["timestamp"]+ 3600, unit='s')
monitoring_data = monitoring_data.rename(columns={"r":"processes","b":"waiting","swdp":"virtual mem","free":"free","buff":"buffers","si":"mem_on_disk","so":"mem_to_disk","bi":"blockIn","bo":"blockOut","in":"interrupts","cs":"switches","us":"cpu_user","sy":"cpu_system","id":"cpu_idle","wa":"blocked"})
return monitoring_data
def load_vmstat(load_from_cache=False,store_cache_file=False,cache_file=None):
monitoring_data = None
if load_from_cache and cache_file is not None:
monitoring_data = pd.read_csv(cache_file)
else:
for file in glob.glob("vmstats/*"):
df = pd.read_csv(file, skiprows = 0,error_bad_lines=False)
if monitoring_data is None:
monitoring_data = df
else:
monitoring_data = pd.concat([monitoring_data, df], sort=True)
#clean up data
monitoring_data = _load_vmstat(monitoring_data)
if store_cache_file:
monitoring_data.to_csv(cache_file)
return monitoring_data
def load_elastic(load_from_cache=False,store_cache_file=False,cache_file=None,es=None,experiment_dates=[]):
monitoring_data = None
if load_from_cache and cache_file is not None:
monitoring_data = pd.read_csv(cache_file)
else:
monitoring_data = collect_monitoring_data(es,"*",experiment_dates)
if store_cache_file:
if monitoring_data is not None:
monitoring_data.to_csv(cache_file)
return monitoring_data
def load_rmstats():
monitoring_data = None
for file in glob.glob("rmstats/*.csv"):
df = pd.read_csv(file, skiprows = 0,error_bad_lines=False)
if monitoring_data is None:
monitoring_data = df
else:
monitoring_data = pd.concat([monitoring_data, df], sort=True)
return monitoring_data
def load_experiment(load_from_cache=False,store_cache_file=False,data_cache_file=None):
data = None
if load_from_cache and data_cache_file is not None:
data = pd.read_csv(data_cache_file)
else:
data = __load()
if store_cache_file:
data.to_csv(data_cache_file)
return data
def __load():
all = None
for file in glob.glob("data/*"):
names = file[5:-4].split("-")
experiment=names[0]
method=names[1]
datetime.strptime
timestamp=file[5+len(experiment)+1+len(method)+1:-4]
date=timestamp[:timestamp.index("T")]
date=datetime.strptime(date, '%Y-%m-%d')
timestamp=parse(timestamp)
df = pd.read_csv(file, skiprows = 0,error_bad_lines=False)
df['experiment']=experiment
df['method']=method
df['startTime']=pd.Timestamp(year=timestamp.year,month=timestamp.month, day=timestamp.day, hour=timestamp.hour, minute=timestamp.minute)
df['runDate']=pd.Timestamp(year=date.year,month=date.month, day=date.day)
if (all is None):
all = df
else:
all = pd.concat([all, df], sort=True)
return all
def __collect(es,index,query):
data = []
page = es.search(
index = index,
scroll = '2m',
size = 1000,
body = query)
if '_scroll_id' in page:
sid = page['_scroll_id']
scroll_size = page['hits']['total']
data = data + page['hits']['hits']
# Start scrolling
while (scroll_size > 0):
page = es.scroll(scroll_id = sid, scroll = '2m')
# Update the scroll ID
sid = page['_scroll_id']
# Get the number of results that we returned in the last scroll
scroll_size = len(page['hits']['hits'])
data = data + page['hits']['hits']
return data
else:
return data
def collect_monitoring_data(es,vdcname,dates=[]):
all = None
for runDate in dates:
esAll = []
index = "{}-{}".format(vdcname,runDate.date().strftime("%Y-%m-%d"))
print("loading data from index",index)
esAll = __collect(es,index,{"query": {"match_all": {}}})
if len(esAll) <= 0:
continue
esAll = list(map(lambda x:x["_source"],esAll))
responses = filter(lambda x:'response.code' in x,esAll)
requests = filter(lambda x:'response.code' not in x,esAll)
responses = | pd.DataFrame(responses) | pandas.DataFrame |
from pecos.utils import index_to_datetime
import matplotlib.pyplot as plt
import datetime as dt
from mhkit import qc
import pandas as pd
import numpy as np
_matlab = False # Private variable indicating if mhkit is run through matlab
def get_statistics(data,freq,period=600,vector_channels=[]):
"""
Calculate mean, max, min and stdev statistics of continuous data for a
given statistical window. Default length of statistical window (period) is
based on IEC TS 62600-3:2020 ED1. Also allows calculation of statistics for multiple statistical
windows of continuous data and accounts for vector/directional channels.
Parameters
------------
data : pandas DataFrame
Data indexed by datetime with columns of data to be analyzed
freq : float/int
Sample rate of data [Hz]
period : float/int
Statistical window of interest [sec], default = 600
vector_channels : string or list (optional)
List of vector/directional channel names formatted in deg (0-360)
Returns
---------
means,maxs,mins,stdevs : pandas DataFrame
Calculated statistical values from the data, indexed by the first timestamp
"""
# Check data type
assert isinstance(data, pd.DataFrame), 'data must be of type pd.DataFrame'
assert isinstance(freq, (float,int)), 'freq must be of type int or float'
assert isinstance(period, (float,int)), 'freq must be of type int or float'
# catch if vector_channels is not an string array
if isinstance(vector_channels,str): vector_channels = [vector_channels]
assert isinstance(vector_channels, list), 'vector_channels must be a list of strings'
# Check timestamp using qc module
data.index = data.index.round('1ms')
dataQC = qc.check_timestamp(data,1/freq)
dataQC = dataQC['cleaned_data']
# Check to see if data length contains enough data points for statistical window
if len(dataQC)%(period*freq) > 0:
remain = len(dataQC) % (period*freq)
dataQC = dataQC.iloc[0:-int(remain)]
print('WARNING: there were not enough data points in the last statistical period. Last '+str(remain)+' points were removed.')
# Pre-allocate lists
time = []
means = []
maxs = []
mins = []
stdev = []
# Get data chunks to performs stats on
step = period*freq
for i in range(int(len(dataQC)/(period*freq))):
datachunk = dataQC.iloc[i*step:(i+1)*step]
# Check whether there are any NaNs in datachunk
if datachunk.isnull().any().any():
print('NaNs found in statistical window...check timestamps!')
input('Press <ENTER> to continue')
continue
else:
# Get stats
time.append(datachunk.index.values[0]) # time vector
maxs.append(datachunk.max()) # maxes
mins.append(datachunk.min()) # mins
means.append(datachunk.mean()) # means
stdev.append(datachunk.std()) # standard deviation
# calculate vector averages and std
for v in vector_channels:
vector_avg, vector_std = vector_statistics(datachunk[v])
means[i][v] = vector_avg # overwrite scalar average for channel
stdev[i][v] = vector_std # overwrite scalar std for channel
# Convert to DataFrames and set index
means = pd.DataFrame(means,index=time)
maxs = pd.DataFrame(maxs,index=time)
mins = pd.DataFrame(mins,index=time)
stdevs = | pd.DataFrame(stdev,index=time) | pandas.DataFrame |
import scanpy as sc
import numpy as np
import scipy as sp
from skmisc.loess import loess
from statsmodels.stats.multitest import multipletests
from scipy.stats import rankdata
import pandas as pd
import time
def score_cell(data,
gene_list,
gene_weight=None,
suffix='',
ctrl_opt='mean_match',
trs_opt='vst',
bc_opt='empi',
ctrlgene_list=None,
n_ctrl=1,
n_genebin=200,
cov_list=None,
random_seed=0,
verbose=False,
copy=False,
return_list=['trs_ep', 'trs_ez']):
"""Score cells based on the trait gene set
Args
----
data (n_cell, n_gene) : AnnData
adata.X should contain size-normalized log1p transformed count data
gene_list (n_trait_gene) : list
Trait gene list
gene_weight (n_trait_gene) : list/np.ndarray
Gene weights for genes in the gene_list.
If gene_weight=None, the weigts are set to be one.
suffix : str
The name of the added cell-level annotations would be
['trs', 'trs_z', 'trs_tp', 'trs_ep', 'trs_ez']+suffix
ctrl_opt : str
Option for selecting the null geneset
None: not using a null geneset
'random': size matched random geneset
'mean_match' size-and-mean-matched random geneset
'mean_bvar_match': size-and-mean-and-bvar-matched random geneset. bvar means biological variance.
trs_opt : str
Option for computing TRS
'mean': average over the genes in the gene_list
'vst': weighted average with weights equal to 1/sqrt(technical_variance_of_logct)
'inv_std': weighted average with weights equal to 1/std
bc_opt : str
Option for cell-wise background correction
None: no correction.
'recipe_vision': normalize by cell-wise mean&var computed using all genes.
'empi': normalize by cell-wise mean&var stratified by mean bins.
ctrlgene_list (n_ctrl_gene) : list
List of control genes to use
n_ctrl : int
Number of control genesets
n_genebin : int
Number of gene bins (to divide the genes by expression)
Only useful when ctrl_opt is not None
cov_list : list of str
Covariates to control for.
The covariates are first centered and then regressed out.
Elements in cov_list should be present in data.obs.columns
random_seed : int
Random seed
copy : bool
If to make copy of the AnnData object
return_list : list
Items to return
Should be a subset of ['trs', 'trs_z', 'trs_tp', 'trs_ep', 'trs_ez']
Returns
-------
adata (n_cell, n_gene) : AnnData
Columns added to data.obs as specified by return_list
"""
np.random.seed(random_seed)
adata = data.copy() if copy else data
# Pre-compute statistics
var_set = set(['mean','var','var_tech'])
obs_set = set(['mean','var'])
if (len(var_set-set(adata.var.columns))>0) | (len(obs_set-set(adata.obs.columns))>0):
if verbose: print('# score_cell: recompute statistics using method.compute_stats')
compute_stats(adata)
# Check options
ctrl_opt_list = [None, 'given', 'random', 'mean_match', 'mean_bvar_match']
trs_opt_list = ['mean', 'vst', 'inv_std']
bc_opt_list = [None, 'recipe_vision', 'empi']
if ctrl_opt not in ctrl_opt_list:
raise ValueError('# score_cell: ctrl_opt not in [%s]'%', '.join([str(x) for x in ctrl_opt_list]))
if trs_opt not in trs_opt_list:
raise ValueError('# score_cell: trs_opt not in [%s]'%', '.join([str(x) for x in trs_opt_list]))
if bc_opt not in bc_opt_list:
raise ValueError('# score_cell: bc_opt not in [%s]'%', '.join([str(x) for x in bc_opt_list]))
if cov_list is not None:
temp_list = list(set(cov_list) - set(adata.obs.columns))
if len(temp_list)>0:
raise ValueError('# score_cell: covariates %s not in data.obs.columns'%','.join(temp_list))
if (len(cov_list)>0) & ('mean' not in cov_list):
raise ValueError('# score_cell: mean needs to be in cov_list')
if verbose:
print('# score_cell: suffix=%s, ctrl_opt=%s, trs_opt=%s, bc_opt=%s'%(suffix, ctrl_opt, trs_opt, bc_opt))
print('# score_cell: n_ctrl=%d, n_genebin=%d'%(n_ctrl, n_genebin))
# Gene-wise statistics
df_gene = pd.DataFrame(index=adata.var_names)
df_gene['gene'] = df_gene.index
df_gene['mean'] = adata.var['mean']
df_gene['var'] = adata.var['var'].values
df_gene['tvar'] = adata.var['var_tech'].values
df_gene['bvar'] = df_gene['var'].values - df_gene['tvar'].values
df_gene.drop_duplicates(subset='gene', inplace=True)
# Update gene_list
gene_list = list(gene_list)
n_gene_old = len(gene_list)
df_trait_gene = pd.DataFrame(index=gene_list, columns=['gene', 'gene_weight'], data=0)
df_trait_gene['gene'] = df_trait_gene.index
df_trait_gene['gene_weight'] = 1 if gene_weight is None else np.array(gene_weight)
df_trait_gene.drop_duplicates(subset='gene', inplace=True)
gene_list = list(set(df_gene['gene'].values) & set(gene_list))
gene_list.sort()
df_trait_gene = df_trait_gene.loc[gene_list].copy()
gene_weight = df_trait_gene['gene_weight'].values.copy()
if verbose:
print('# score_cell: %-15s %-15s %-20s'
%('trait geneset,', '%d/%d genes,'%(len(gene_list),n_gene_old),
'mean_exp=%0.2e'%df_gene.loc[gene_list, 'mean'].mean()))
# Select control genes: put all methods in _select_ctrl_geneset
dic_ctrl_list,dic_ctrl_weight = _select_ctrl_geneset(df_gene,
gene_list, gene_weight,
ctrl_opt, ctrlgene_list,
n_ctrl, n_genebin,
random_seed, verbose)
# Compute TRS: put all methods in _compute_trs
dic_trs = {}
dic_trs['trs'] = _compute_trs(adata, gene_list, gene_weight, trs_opt, cov_list=cov_list)
for i_list in dic_ctrl_list.keys():
dic_trs['trs_ctrl%d'%i_list] = _compute_trs(adata,
dic_ctrl_list[i_list],
dic_ctrl_weight[i_list],
trs_opt, cov_list=cov_list)
# Correct cell-specific and geneset-specific background: put all methods in _correct_background
_correct_background(adata, dic_trs, bc_opt)
# Get p-value
if 'trs_tp' in return_list:
dic_trs['trs_tp'] = 1 - sp.stats.norm.cdf(dic_trs['trs_z'])
if len(dic_ctrl_list.keys())>0:
v_ctrl_trs_z = []
for i_list in dic_ctrl_list.keys():
v_ctrl_trs_z += list(dic_trs['trs_ctrl%d_z'%i_list])
dic_trs['trs_ep'] = get_p_from_empi_null(dic_trs['trs_z'], v_ctrl_trs_z)
if 'trs_ez' in return_list:
dic_trs['trs_ez'] = -sp.stats.norm.ppf(dic_trs['trs_ep'])
dic_trs['trs_ez'] = dic_trs['trs_ez'].clip(min=-10,max=10)
for term in return_list:
if term in dic_trs.keys():
adata.obs['%s%s'%(term,suffix)] = dic_trs[term].copy()
else:
print('# score_cell: %s not computed'%term)
return adata if copy else None
def _select_ctrl_geneset(input_df_gene, gene_list, gene_weight,
ctrl_opt, ctrlgene_list,
n_ctrl, n_genebin, random_seed, verbose):
"""Subroutine for score_cell, select control genesets
Args
----
input_df_gene (adata.shape[1], n_statistic) : pd.DataFrame
Gene-wise statistics
gene_list (n_trait_gene) : list
Trait gene list
gene_weight (n_trait_gene) : list/np.ndarray
Gene weights for genes in the gene_list.
ctrl_opt : str
Option for selecting the null geneset
None: not using a null geneset
'random': size matched random geneset
'mean_match' size-and-mean-matched random geneset
'mean_bvar_match': size-and-mean-and-bvar-matched random geneset. bvar means biological variance.
ctrlgene_list (n_ctrl_gene) : list
List of control genes to use
n_ctrl : int
Number of control genesets
n_genebin : int
Number of gene bins (to divide the genes by expression)
Only useful when ctrl_opt is not None
random_seed : int
Random seed
Returns
-------
dic_ctrl_list : dictionary
dic_ctrl_list[i]: the i-th control gene list (a list)
dic_ctrl_weight : dictionary
dic_ctrl_weight[i]: weights for the i-th control gene list (a list)
"""
np.random.seed(random_seed)
df_gene = input_df_gene.copy()
gene_list = list(gene_list)
df_trait_gene = pd.DataFrame(index=gene_list, columns=['gene', 'gene_weight'], data=0)
df_trait_gene['gene'] = df_trait_gene.index
df_trait_gene['gene_weight'] = list(gene_weight)
dic_ctrl_list = {}
dic_ctrl_weight = {}
if ctrl_opt=='given':
dic_ctrl_list[0] = ctrlgene_list
dic_ctrl_weight[0] = np.ones(len(ctrlgene_list))
if ctrl_opt=='random':
for i_list in np.arange(n_ctrl):
ind_select = np.random.permutation(df_gene.shape[0])[:len(gene_list)]
dic_ctrl_list[i_list] = list(df_gene['gene'].values[ind_select])
dic_ctrl_weight[i_list] = df_trait_gene['gene_weight'].values.copy()
if ctrl_opt=='mean_match':
# Divide genes into bins based on their rank of mean expression
df_gene['qbin'] = pd.qcut(df_gene['mean'], q=n_genebin, labels=False)
df_gene_bin = df_gene.groupby('qbin').agg({'gene':set})
gene_list_as_set = set(gene_list)
for i_list in np.arange(n_ctrl):
dic_ctrl_list[i_list] = []
dic_ctrl_weight[i_list] = []
for bin_ in df_gene_bin.index:
temp_overlap_list = list(df_gene_bin.loc[bin_,'gene'] & gene_list_as_set)
temp_overlap_list.sort()
n_gene_in_bin = len(temp_overlap_list)
if n_gene_in_bin>0:
temp_list = list(df_gene_bin.loc[bin_, 'gene'])
temp_list.sort()
v_gene_bin = np.array(temp_list)
ind_select = np.random.permutation(v_gene_bin.shape[0])[0:n_gene_in_bin]
dic_ctrl_list[i_list] += list(v_gene_bin[ind_select])
dic_ctrl_weight[i_list] += list(df_trait_gene.loc[temp_overlap_list,'gene_weight'].values)
if ctrl_opt=='mean_bvar_match':
# Divide genes into bins based on their rank of mean expression and biological variance
n_qbin = int(np.ceil(np.sqrt(n_genebin)))
df_gene['mean_qbin'] = | pd.qcut(df_gene['mean'], q=n_qbin, labels=False) | pandas.qcut |
# Definition of data structures for BDNE project
# Author : <NAME> <<EMAIL>>
# Imports for type-annotations
from __future__ import annotations
from typing import Tuple, List, Dict, Union, Callable, Any
# Import random to sample from the sets
import random
# Import numpy as part of the type-annotations
import numpy as np
# Import pandas to output the data as a dataframe
import pandas as pd
# Import Mapping to implement the experimental metadata as a mapping class
from collections.abc import Mapping
# Import the core BDNE ORM and configuration to deal with the database
import BDNE.db_orm as db
import BDNE.config as cfg
from BDNE.config import db_batch_size
#################################################################
# A cache class for storing data locally
#################################################################
class DBCache:
"""A basic cache class for database IDs- never kicks out old data unless told to"""
# Store the data in pd.DataFrame
_cache: pd.DataFrame
def __init__(self) -> None:
"""Set up pandas dataframe to store data internally"""
self._cache = pd.DataFrame()
def clear(self) -> None:
"""Empty the cache"""
self._cache = pd.DataFrame()
def __call__(self, ids: List[int]) -> Tuple[List[int], pd.DataFrame]:
"""Convenience function to retrieve a list of results"""
return self.check(ids)
def check(self, ids: List[int]) -> Tuple[List[int], pd.DataFrame]:
"""Look for hits with supplied ids, must be unique index"""
if len(ids) == 0:
return [], | pd.DataFrame() | pandas.DataFrame |
"""Genetic evaluation of individuals."""
import os
import sys
# import time
from collections import Counter
from itertools import compress
from numba import njit
import pkg_resources
import numpy as np
import pandas as pd
import scipy.linalg
import scipy.stats
def example_data():
"""Provide data to the package."""
cwd = os.getcwd()
stream = pkg_resources.resource_stream(__name__, 'data/chr.txt')
chrmosomedata = pd.read_table(stream, sep=" ")
stream = pkg_resources.resource_stream(__name__, 'data/group.txt')
groupdata = pd.read_table(stream, sep=" ")
stream = pkg_resources.resource_stream(__name__, 'data/effects.txt')
markereffdata = pd.read_table(stream, sep=" ")
stream = pkg_resources.resource_stream(__name__, 'data/phase.txt')
genodata = pd.read_table(stream, header=None, sep=" ")
stream = pkg_resources.resource_stream(__name__, 'data/ped.txt')
ped = pd.read_table(stream, header=None, sep=" ")
os.chdir(cwd)
return chrmosomedata, markereffdata, genodata, groupdata, ped
if __name__ == "__main__":
example_data()
@njit
def fnrep2(gen, aaxx, aaxx1):
"""Code phased genotypes into 1, 2, 3 and 4."""
qqq = np.empty((int(gen.shape[0]/2), gen.shape[1]), np.int_)
for i in range(qqq.shape[0]):
for j in range(qqq.shape[1]):
if gen[2*i, j] == aaxx and gen[2*i+1, j] == aaxx:
qqq[i, j] = 1
elif gen[2*i, j] == aaxx1 and gen[2*i+1, j] == aaxx1:
qqq[i, j] = 2
elif gen[2*i, j] == aaxx and gen[2*i+1, j] == aaxx1:
qqq[i, j] = 3
else:
qqq[i, j] = 4
return qqq
def haptogen(gen, progress=False):
"""Convert haplotypes to coded genotypes."""
if progress:
print("Converting phased haplotypes to genotypes")
if gen.shape[1] == 2:
gen = np.array(gen.iloc[:, 1]) # del col containing ID
# convert string to 2D array of integers
gen = [list(gen[i].rstrip()) for i in range(gen.shape[0])]
gen = np.array(gen, int)
# derives the frequency of alleles to determine the major allele
allele = np.asarray(np.unique(gen, return_counts=True)).T.astype(int)
if len(allele[:, 0]) != 2:
sys.exit("method only supports biallelic markers")
aaxx = allele[:, 0][np.argmax(allele[:, 1])] # major allele
aaasns = np.isin(allele[:, 0], aaxx, invert=True)
aaxx1 = int(allele[:, 0][aaasns]) # minor allele
gen = np.array(gen, int)
gen = fnrep2(gen, aaxx, aaxx1)
elif gen.shape[1] > 2:
gen = gen.iloc[:, 1:gen.shape[1]] # del col containing ID
# derives the frequency of alleles to determine the major allele
allele = np.asarray(np.unique(gen, return_counts=True)).T.astype(int)
if len(allele[:, 0]) != 2:
sys.exit("method only supports biallelic markers")
aaxx = allele[:, 0][np.argmax(allele[:, 1])] # major allele
aaasns = np.isin(allele[:, 0], aaxx, invert=True)
aaxx1 = int(allele[:, 0][aaasns]) # minor allele
gen = np.array(gen, int)
gen = fnrep2(gen, aaxx, aaxx1)
return gen
class Datacheck:
"""Check the input data for errors and store relevant info as an object."""
def __init__(self, gmap, meff, gmat, group, indwt, progress=False):
"""
Check input data for errors and store relevant info as class object.
Parameters
----------
gmap : pandas.DataFrame
Index: RangeIndex
Columns:
Name: CHR, dtype: int64; chromosome number
Name: SNPName, dtype: object; marker name
Name: Position: dtype: int64; marker position in bp
Name: group: dtype: float64; marker distance (cM) or reco rates
meff : pandas.DataFrame
Index: RangeIndex
Columns:
Name: trait names: float64; no. of columns = no of traits
gmat : pandas.DataFrame
Index: RangeIndex
Columns:
Name: ID, dtype: int64 or str; identification of individuals
Name: haplotypes, dtype: object; must be biallelic
group : pandas.DataFrame
Index: RangeIndex
Columns:
Name: group, dtype: object; group code of individuals, e.g., M, F
Name: ID, dtype: int64 or str; identification of individuals
indwt : list of index weights for each trait
progress : bool, optional; print progress of the function if True
Returns stored input files
-------
"""
# check: ensures number of traits match size of index weights
indwt = np.array(indwt)
if (meff.shape[1]-1) != indwt.size:
sys.exit('no. of index weights do not match marker effects cols')
# check: ensure individuals' genotypes match group and ID info
id_indgrp = pd.Series(group.iloc[:, 1]).astype(str) # no of inds
if not pd.Series(
pd.unique(gmat.iloc[:, 0])).astype(str).equals(id_indgrp):
sys.exit("ID of individuals in group & genotypic data don't match")
# check: ensure marker names in marker map and effects match
if not (gmap.iloc[:, 1].astype(str)).equals(meff.iloc[:, 0].astype(str)):
print("Discrepancies between marker names")
sys.exit("Check genetic map and marker effects")
# check: ensure marker or allele sub effect are all numeric
meff = meff.iloc[:, 1:meff.shape[1]]
test = meff.apply(
lambda s: pd.to_numeric(s, errors='coerce').notnull().all())
if not test.all():
sys.exit("Marker or allele sub effects contain non-numeric values")
# check: ensure unique maps match no of groups if map more than 1
grpg = pd.unique(group.iloc[:, 0]) # groups of individuals
grp_chrom = gmap.shape[1]-3 # no of unique maps
gmat = haptogen(gmat, progress)
if grp_chrom > 1 and grp_chrom != grpg.size:
sys.exit("no. of unique maps does not match no. of groups")
# check no of markers in genotype and map and marker effects match
no_markers = gmap.shape[0] # no of markers
if no_markers != gmat.shape[1] or no_markers != meff.shape[0]:
sys.exit("markers nos in gen, chrm or marker effects don't match")
# check: ordered marker distance or recombination rates
for grn in range(grp_chrom):
for chrm in pd.unique(gmap.iloc[:, 0]):
mpx = np.array(gmap.iloc[:, 3+grn][gmap.iloc[:, 0] == chrm])
if not (mpx == np.sort(sorted(mpx))).any():
sys.exit(
f"Faulty marker map on chr {chrm} for grp {grpg[grn]}")
if progress:
print('Data passed the test!')
print("Number of individuals: ", len(id_indgrp))
print("Number of groups: ", len(grpg), ": ", grpg)
print("Number of specific maps:", grp_chrom)
print("Number of chromosomes: ", len(pd.unique(gmap.iloc[:, 0])))
print("Total no. markers: ", no_markers)
print("Number of trait(s): ", meff.columns.size)
print("Trait name(s) and Index weight(s)")
if meff.columns.size == 1:
print(meff.columns[0], ": ", indwt[0])
elif meff.columns.size > 1:
for i in range(meff.columns.size):
print(meff.columns[i], ": ", indwt[i])
self.gmap = gmap
self.meff = meff
self.gmat = gmat
self.group = group
self.indwt = indwt
def elem_cor(mylist, mprc, ngp, mposunit, method, chrm):
"""Derive pop cov matrix."""
if method == 1: # Bonk et al's approach
if mposunit in ("cM", "cm", "CM", "Cm"):
tmp = np.exp(-2*(np.abs(mprc - mprc[:, None])/100))/4
elif mposunit in ("reco", "RECO"):
if mprc[0] != 0:
sys.exit(f"First value for reco rate on chr {chrm} isn't zero")
aaa = (1-(2*mprc))/4
ida = np.arange(aaa.size)
tmp = aaa[np.abs(ida - ida[:, None])]
elif method == 2: # Santos et al's approach
if mposunit in ("cM", "cm", "CM", "Cm"):
tmp = (-1*(np.abs(mprc - mprc[:, None])/200))+0.25
cutoff = (-1*(50/200))+0.25
tmp = np.where(tmp < cutoff, 0, tmp)
elif mposunit in ("reco", "RECO"):
if mprc[0] != 0:
sys.exit(f"First value for reco rate on chr {chrm} isn't zero")
aaa = (-1*(mprc/2))+0.25
ida = np.arange(aaa.size)
tmp = aaa[np.abs(ida - ida[:, None])]
cutoff = (-1*(0.5/2))+0.25
tmp = np.where(tmp < cutoff, 0, tmp)
# append chromosome-specific covariance matrix to list
mylist[int(ngp)].append(tmp)
return mylist
def popcovmat(info, mposunit, method):
"""
Derive population-specific covariance matrices.
Parameters
----------
info : class object
A class object created using the function "datacheck"
mposunit : string
A sting with containing "cM" or "reco".
method : int
An integer with a value of 1 for Bonk et al.'s approach or
2 for Santos et al's approach'
Returns
-------
mylist : list
A list containing group-specific pop covariance matrices for each chr.
"""
if mposunit not in ("cM", "cm", "CM", "Cm", "reco", "RECO"):
sys.exit("marker unit should be either cM or reco")
# unique group name for naming the list if map is more than 1
probn = pd.unique(info.group.iloc[:, 0].astype(str)).tolist()
chromos = pd.unique(info.gmap.iloc[:, 0]) # chromosomes
no_grp = info.gmap.shape[1]-3 # no of maps
mylist = [] # list stores chromosome-wise covariance matrix
for ngp in range(no_grp):
mylist.append([])
# marker position in cM or recombination rates
grouprecodist = info.gmap.iloc[:, 3+ngp]
for chrm in chromos:
mpo = np.array(grouprecodist[info.gmap.iloc[:, 0] == (chrm)])
elem_cor(mylist, mpo, ngp, mposunit, method, chrm)
if no_grp > 1:
# if map is more than one, name list using group names
mylist = dict(zip(probn, mylist))
return mylist
@njit
def makemems(gmat, meff):
"""Set up family-specific marker effects (Mendelian sampling)."""
qqq = np.zeros((gmat.shape))
for i in range(gmat.shape[0]):
for j in range(gmat.shape[1]):
if gmat[i, j] == 4:
qqq[i, j] = meff[j]*-1
elif gmat[i, j] == 3:
qqq[i, j] = meff[j]
else:
qqq[i, j] = 0
return qqq
@njit
def makemebv(gmat, meff):
"""Set up family-specific marker effects (GEBV)."""
qqq = np.zeros((gmat.shape))
for i in range(gmat.shape[0]):
for j in range(gmat.shape[1]):
if gmat[i, j] == 2:
qqq[i, j] = meff[j]*-1
elif gmat[i, j] == 1:
qqq[i, j] = meff[j]
else:
qqq[i, j] = 0
return qqq
def traitspecmatrices(gmat, meff):
"""Store trait-specific matrices in a list."""
notr = meff.shape[1] # number of traits
slist = [] # list stores trait-specific matrices
slist.append([])
for i in range(notr):
# specify data type for numba
mefff = np.array(meff.iloc[:, i], float)
matrix_ms = makemems(gmat, mefff)
slist[0].append(matrix_ms)
return slist
def namesdf(notr, trait_names):
"""Create names of dataframe columns for Mendelian co(var)."""
tnn = np.zeros((notr, notr), 'U20')
tnn = np.chararray(tnn.shape, itemsize=30)
for i in range(notr):
for trt in range(notr):
if i == trt:
tnn[i, trt] = str(trait_names[i])
elif i != trt:
tnn[i, trt] = "{}_{}".format(trait_names[i], trait_names[trt])
colnam = tnn[np.tril_indices(notr)]
return colnam
def mrmmult(temp, covmat):
"""Matrix multiplication (MRM' or m'Rm)."""
return temp @ covmat @ temp.T
def dgmrm(temp, covmat):
"""Matrix multiplication (MRM') for bigger matrices."""
temp1111 = scipy.linalg.blas.dgemm(alpha=1.0, a=temp, b=covmat)
return scipy.linalg.blas.dgemm(alpha=1.0, a=temp1111, b=temp.T)
def progr(itern, total):
"""Print progress of a task."""
fill, printend, prefix, suffix = '█', "\r", 'Progress:', 'Complete'
deci, length = 0, 50
percent = ("{0:." + str(deci) + "f}").format(100 * (itern / float(total)))
filledlen = int(length * itern // total)
bars = fill * filledlen + '-' * (length - filledlen)
print(f'\r{prefix} |{bars}| {percent}% {suffix}', end=printend)
if itern == total:
print()
def subindcheck(info, sub_id):
"""Check if inds provided in pd.DataFrame (sub_id) are in group data."""
sub_id = pd.DataFrame(sub_id).reset_index(drop=True)
if sub_id.shape[1] != 1:
sys.exit("Individuals' IDs (sub_id) should be provided in one column")
numbers = info.group.iloc[:, 1].astype(str).tolist()
sub_id = sub_id.squeeze().astype(str).tolist()
aaa = [numbers.index(x) if x in numbers else None for x in sub_id]
aaa = np.array(aaa)
if len(aaa) != len(sub_id):
sys.exit("Some individual ID could not be found in group data")
return aaa
def msvarcov_g_st(info, covmat, sub_id, progress=False):
"""Derive Mendelian sampling co(variance) for single trait."""
if sub_id is not None:
aaa = subindcheck(info, sub_id)
idn = info.group.iloc[aaa, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[aaa, 0].reset_index(drop=True).astype(str)
matsub = info.gmat[aaa, :]
else:
idn = info.group.iloc[:, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[:, 0].reset_index(drop=True).astype(str)
matsub = info.gmat
if (info.gmap.shape[1]-3 == 1 and len(pd.unique(groupsex)) > 1):
print("The same map will be used for all groups")
if progress:
progr(0, matsub.shape[0]) # print progress bar
snpindexxx = np.arange(start=0, stop=info.gmap.shape[0], step=1)
notr = info.meff.columns.size
slist = traitspecmatrices(matsub, info.meff)
# dataframe to save Mendelian sampling (co)variance and aggregate breeding
msvmsc = np.empty((matsub.shape[0], 1))
for i in range(matsub.shape[0]): # loop over no of individuals
mscov = np.zeros((notr, notr)) # Mendelian co(var) mat for ind i
for chrm in pd.unique(info.gmap.iloc[:, 0]):
# snp index for chromosome chrm
s_ind = np.array(snpindexxx[info.gmap.iloc[:, 0] == (chrm)])
# family-specific marker effects for ind i
temp = np.zeros((notr, len(s_ind)))
for trt in range(notr):
temp[trt, :] = slist[0][trt][i, s_ind]
if info.gmap.shape[1]-3 == 1:
mscov = mscov + mrmmult(temp, covmat[0][chrm-1])
else:
mscov = mscov + mrmmult(temp, covmat[groupsex[i]][chrm-1])
msvmsc[i, 0] = mscov
if progress:
progr(i + 1, matsub.shape[0]) # print progress bar
msvmsc = pd.DataFrame(msvmsc)
msvmsc.columns = info.meff.columns
msvmsc.insert(0, "ID", idn, True)
msvmsc.insert(1, "Group", groupsex, True) # insert group
return msvmsc
def msvarcov_g_mt(info, covmat, sub_id, progress=False):
"""Derive Mendelian sampling co(variance) for multiple traits."""
if sub_id is not None:
aaa = subindcheck(info, sub_id)
idn = info.group.iloc[aaa, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[aaa, 0].reset_index(drop=True).astype(str)
matsub = info.gmat[aaa, :]
else:
idn = info.group.iloc[:, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[:, 0].reset_index(drop=True).astype(str)
matsub = info.gmat
if (info.gmap.shape[1]-3 == 1 and len(pd.unique(groupsex)) > 1):
print("The same map will be used for all groups")
if progress:
progr(0, matsub.shape[0]) # print progress bar
snpindexxx = np.arange(start=0, stop=info.gmap.shape[0], step=1)
notr = info.meff.columns.size
slist = traitspecmatrices(matsub, info.meff)
# dataframe to save Mendelian sampling (co)variance and aggregate breeding
mad = len(np.zeros((notr+1, notr+1))[np.tril_indices(notr+1)])
msvmsc = np.empty((matsub.shape[0], mad))
for i in range(matsub.shape[0]): # loop over no of individuals
mscov = np.zeros((notr+1, notr+1)) # Mendelian co(var) mat for ind i
for chrm in pd.unique(info.gmap.iloc[:, 0]):
# snp index for chromosome chrm
s_ind = np.array(snpindexxx[info.gmap.iloc[:, 0] == (chrm)])
# family-specific marker effects for ind i
temp = np.zeros((notr+1, len(s_ind)))
for trt in range(notr):
temp[trt, :] = slist[0][trt][i, s_ind]
temp[notr, :] = np.matmul(info.indwt.T, temp[0:notr, :])
if info.gmap.shape[1]-3 == 1:
mscov = mscov + mrmmult(temp, covmat[0][chrm-1])
else:
mscov = mscov + mrmmult(temp, covmat[groupsex[i]][chrm-1])
msvmsc[i, :] = mscov[np.tril_indices(notr+1)]
if progress:
progr(i + 1, matsub.shape[0]) # print progress bar
msvmsc = pd.DataFrame(msvmsc)
tnames = np.concatenate((info.meff.columns, "AG"), axis=None)
colnam = namesdf(notr+1, tnames).decode('utf-8')
msvmsc.columns = colnam
msvmsc.insert(0, "ID", idn, True)
msvmsc.insert(1, "Group", groupsex, True) # insert group
return msvmsc
def msvarcov_g(info, covmat, sub_id, progress=False):
"""
Derive Mendelian sampling co(variance) and aggregate genotype.
Parameters
----------
info : class object
A class object created using the function "datacheck"
covmat : A list of pop cov matrices created using "popcovmat" function
sub_id : pandas.DataFrame with one column
Index: RangeIndex (minimum of 2 rows)
Containing ID numbers of specific individuals to be evaluated
progress : bool, optional; print progress of the function if True
Returns
-------
msvmsc : pandas.DataFrame
containing the Mendelian sampling (co)variance and aggregate genotype
Note: If sub_id is None, Mendelian (co-)variance will be estimated for
all individuals. Otherwise, Mendelian (co-)variance will be estimated for
the individuals in sub_id
"""
notr = info.meff.columns.size
if notr == 1:
msvmsc = msvarcov_g_st(info, covmat, sub_id, progress)
elif notr > 1:
msvmsc = msvarcov_g_mt(info, covmat, sub_id, progress)
return msvmsc
def array2sym(array):
"""Convert array to stdized symm mat, and back to array without diags."""
dfmsize = array.size
for notr in range(1, 10000):
if dfmsize == len(np.zeros((notr, notr))[np.tril_indices(notr)]):
break
iii, jjj = np.tril_indices(notr)
mat = np.empty((notr, notr), float)
mat[iii, jjj], mat[jjj, iii] = array, array
mat = np.array(mat)
mat1 = cov2corr(mat)
return np.array(mat1[np.tril_indices(notr, k=-1)])
def msvarcov_gcorr(msvmsc):
"""
Standardize Mendelian sampling co(variance) and aggregate genotype.
Parameters
----------
msvmsc : pandas.DataFrame
containing the Mendelian sampling (co)variance and aggregate genotype
created using msvarcov_g function
Returns
-------
dfcor : pandas.DataFrame
containing standardized Mendelian sampling (co)variance
"""
if msvmsc.columns.size == 3:
sys.exit("Correlation cannot be derived for a single trait")
dfm = msvmsc.iloc[:, 2:msvmsc.shape[1]] # exclude ID and group
dfmsize = dfm.shape[1]
# derive number of traits
for notr in range(1, 10000):
if dfmsize == len(np.zeros((notr, notr))[np.tril_indices(notr)]):
break
# standardize covariance between traits
dfcor = dfm.apply(array2sym, axis=1)
# extract column names
listnames = dfm.columns.tolist()
cnames = [x for x in listnames if "_" in x]
# convert pd.series of list to data frame
dfcor = pd.DataFrame.from_dict(dict(zip(dfcor.index, dfcor.values))).T
dfcor.columns = cnames
# insert ID and group info
dfcor = [pd.DataFrame(msvmsc.iloc[:, 0:2]), dfcor] # add ID and GRP
dfcor = pd.concat(dfcor, axis=1)
return dfcor
def calcgbv(info, sub_id):
"""Calculate breeding values for each trait."""
if sub_id is not None:
aaa = subindcheck(info, sub_id)
idn = info.group.iloc[aaa, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[aaa, 0].reset_index(drop=True).astype(str)
matsub = info.gmat[aaa, :]
else:
idn = info.group.iloc[:, 1].reset_index(drop=True).astype(str) # ID
groupsex = info.group.iloc[:, 0].reset_index(drop=True).astype(str)
matsub = info.gmat
no_individuals = matsub.shape[0] # Number of individuals
trait_names = info.meff.columns # traits names
notr = trait_names.size # number of traits
if notr == 1:
gbv = np.zeros((no_individuals, notr))
mefff = np.array(info.meff.iloc[:, 0], float) # type spec for numba
matrix_me = makemebv(matsub, mefff) # fam-spec marker effects BV
gbv[:, 0] = matrix_me.sum(axis=1) # sum all effects
gbv = pd.DataFrame(gbv)
gbv.columns = trait_names
elif notr > 1:
gbv = np.zeros((no_individuals, notr+1))
for i in range(notr):
mefff = np.array(info.meff.iloc[:, i], float) # type spec 4 numba
matrix_me = makemebv(matsub, mefff) # fam-spec marker effects BV
gbv[:, i] = matrix_me.sum(axis=1) # sum all effects for each trait
gbv[:, notr] = gbv[:, notr] + info.indwt[i]*gbv[:, i] # Agg gen
gbv = pd.DataFrame(gbv)
colnames = np.concatenate((trait_names, "ABV"), axis=None)
gbv.columns = colnames
gbv.insert(0, "ID", idn, True) # insert ID
gbv.insert(1, "Group", groupsex, True) # insert group
return gbv
def calcprob(info, msvmsc, thresh):
"""Calculate the probability of breeding top individuals."""
aaa = subindcheck(info, pd.DataFrame(msvmsc.iloc[:, 0]))
gbvall = calcgbv(info, None) # calc GEBV for all inds used by thresh
gbv = gbvall.iloc[aaa, :].reset_index(drop=True) # GEBV matching msvmsc
no_individuals = gbv.shape[0] # Number of individuals
trait_names = info.meff.columns # traits names
notr = trait_names.size # number of traits
if notr == 1:
probdf = np.zeros((no_individuals, notr))
ttt = np.quantile(gbvall.iloc[:, (0+2)], q=1-thresh) # threshold
probdf[:, 0] = 1 - scipy.stats.norm.cdf(
ttt, loc=gbv.iloc[:, (0+2)], scale=np.sqrt(msvmsc.iloc[:, 0+2]))
probdf = pd.DataFrame(probdf)
probdf.columns = trait_names
elif notr > 1:
colnam = np.concatenate((info.meff.columns, "AG"), axis=None)
colnam = namesdf(notr+1, colnam).decode('utf-8')
ttt = np.quantile(gbvall.iloc[:, (notr+2)], q=1-thresh) # threshold
probdf = np.zeros((no_individuals, notr+1))
t_ind = np.arange(colnam.shape[0])[np.in1d(colnam, trait_names)]
for i in range(notr):
ttt = np.quantile(gbvall.iloc[:, (i+2)], q=1-thresh) # threshold
probdf[:, i] = scipy.stats.norm.cdf(
ttt, loc=gbv.iloc[:, (i+2)], scale=np.sqrt(
msvmsc.iloc[:, (t_ind[i])+2]))
probdf[:, i] = np.nan_to_num(probdf[:, i]) # convert Inf to zero
probdf[:, i] = 1 - probdf[:, i]
ttt = np.quantile(gbvall.iloc[:, (notr+2)], q=1-thresh)
probdf[:, notr] = scipy.stats.norm.cdf(
ttt, loc=gbv.iloc[:, (notr+2)], scale=np.sqrt(
msvmsc["AG"]))
probdf[:, notr] = np.nan_to_num(probdf[:, notr]) # Agg
probdf[:, notr] = 1 - probdf[:, notr]
probdf = pd.DataFrame(probdf) # convert matrix to dataframe
colnames = np.concatenate((trait_names, "ABV"), axis=None)
probdf.columns = colnames
probdf = [pd.DataFrame(gbv.iloc[:, 0:2]), probdf] # add ID and GRP
probdf = pd.concat(probdf, axis=1)
return probdf
def calcindex(info, msvmsc, const):
"""Calculate the index if constant is known."""
sub_id = pd.DataFrame(msvmsc.iloc[:, 0])
gbv = calcgbv(info, sub_id) # calc GEBV
no_individuals = gbv.shape[0] # Number of individuals
trait_names = info.meff.columns # traits names
notr = trait_names.size
if notr == 1:
indexdf = np.zeros((no_individuals, notr))
indexdf[:, 0] = (gbv.iloc[:, (0+2)]/2) + np.sqrt(
msvmsc.iloc[:, 0+2])*const
indexdf = pd.DataFrame(indexdf)
indexdf.columns = trait_names
elif notr > 1:
colnam = np.concatenate((info.meff.columns, "AG"), axis=None)
colnam = namesdf(notr+1, colnam).decode('utf-8')
indexdf = np.zeros((no_individuals, notr+1))
t_ind = np.arange(colnam.shape[0])[np.in1d(colnam, trait_names)]
for i in range(notr):
indexdf[:, i] = (gbv.iloc[:, (i+2)]/2) + np.sqrt(
msvmsc.iloc[:, (t_ind[i]+2)])*const
indexdf[:, notr] = (gbv.iloc[:, (notr+2)]/2) + np.sqrt(
msvmsc["AG"])*const
indexdf = pd.DataFrame(indexdf)
colnames = np.concatenate((trait_names, "ABV"), axis=None)
indexdf.columns = colnames
indexdf = [pd.DataFrame(gbv.iloc[:, 0:2]), indexdf] # add ID and GRP
indexdf = pd.concat(indexdf, axis=1)
return indexdf
def selstrat_g(selstrat, info, sub_id, msvmsc, throrconst):
"""
Calc selection criteria (GEBV, PBTI, or index using gametic approach.
Parameters
----------
selstrat : str
A str containing any of GEBV, PBTI or index
info : class object
A class object created using the function "datacheck"
sub_id : pandas.DataFrame with one column
Index: RangeIndex (minimum of 2 rows)
Containing ID numbers of specific individuals to be evaluated
msvmsc : pandas.DataFrame
DF created using the function "msvarcov_g"
throrconst : float
If selstrat is PBTI, a throrconst of value 0.05 sets threshold at
top 5% of GEBV. If selstrat is index, throrconst is a constant.
If selstrat is GEBV, throrconst can be any random value.
Returns
-------
data : pandas.DataFrame
Index: RangeIndex
Columns:
ID, Group, trait names and Aggregate Breeding Value (ABV)
Note: If selstrat is GEBV, None may be used for throrconst and msvmsc.
If sub_id is None and selstrat is GEBV, GEBVs will be estimated for all
individuals. However, if selstrat is not GEBV, the chosen selection
criterion will be estimated for all individuals in msvmsc data frame.
"""
if selstrat in ("PBTI", "pbti", "index", "INDEX") and msvmsc is None:
sys.exit("Provide Mendelian (co-)variance dataframe: 'msvmsc'")
if selstrat in ("PBTI", "pbti", "index", "INDEX") and throrconst is None:
sys.exit("Provide value for throrconst parameter")
if selstrat not in ('GEBV', 'gebv', 'PBTI', 'pbti', 'index', 'INDEX'):
sys.exit("selection strategy should be one of GEBV, PBTI or INDEX")
if selstrat in ('GEBV', 'gebv'):
data = calcgbv(info, sub_id)
elif selstrat in ('PBTI', 'pbti'):
if throrconst > 1 or throrconst < 0:
sys.exit("value must be in the range of 0 and 1")
data = calcprob(info, msvmsc, throrconst)
elif selstrat in ('index', 'INDEX'):
data = calcindex(info, msvmsc, throrconst)
return data
def cov2corr(cov):
"""Convert covariance to correlation matrix."""
cov = np.asanyarray(cov)
std_ = np.sqrt(np.diag(cov))
with np.errstate(invalid='ignore'):
corr = cov / np.outer(std_, std_)
return corr
def aggen(us_ind, no_markers, slst, indwt):
"""Set up additive effects matrix of aggregate genotype."""
mmfinal = np.empty((len(us_ind), no_markers))
xxx = 0
for iii in us_ind:
tmpmt1 = np.array([slst[0][trt][iii, :] for trt in range(indwt.size)])
mmfinal[xxx, :] = np.matmul(indwt.transpose(), tmpmt1)
xxx = xxx + 1
return mmfinal
def chr_int(xxxxx):
"""Format chromomosome of interest parameter."""
if 'all' in xxxxx:
xxxxx = 'all'
elif 'none' in xxxxx:
xxxxx = 'none'
else:
xxxxx = np.array([int(i) for i in xxxxx])
return xxxxx
def writechr(covtmpx, chrinterest, chrm, trtnam, probx, stdsim):
"""Write matrices to file."""
if isinstance(chrinterest, str):
if chrinterest == 'all':
chrfile1 = "{}/Sim mat for {} chrm {} grp {}.npy".format(
os.getcwd(), trtnam, chrm, probx)
np.save(chrfile1, covtmpx)
elif chrm in chrinterest:
chrfile1 = "{}/Sim mat for {} chrm {} grp {}.npy".format(
os.getcwd(), trtnam, chrm, probx) # output file
np.save(chrfile1, covtmpx)
if stdsim:
if isinstance(chrinterest, str):
if chrinterest == 'all':
chrfilec = "{}/Stdsim mat for {} chrm {} grp {}.npy".format(
os.getcwd(), trtnam, chrm, probx) # output file
np.save(chrfilec, cov2corr(covtmpx))
elif chrm in chrinterest:
chrfilec = "{}/Stdsim mat for {} chrm {} grp {}.npy".format(
os.getcwd(), trtnam, chrm, probx) # output file
np.save(chrfilec, cov2corr(covtmpx))
def writechrunspec(covtmpx, chrinterest, chrm, trtnam, stdsim):
"""Write matrices to file."""
if isinstance(chrinterest, str):
if chrinterest == 'all':
chrfile1 = "{}/Sim mat for {} chrm {}.npy".format(
os.getcwd(), trtnam, chrm)
np.save(chrfile1, covtmpx)
elif chrm in chrinterest:
chrfile1 = "{}/Sim mat for {} chrm {}.npy".format(
os.getcwd(), trtnam, chrm) # output file
np.save(chrfile1, covtmpx)
if stdsim:
if isinstance(chrinterest, str):
if chrinterest == 'all':
chrfilec = "{}/Stdsim mat for {} chrm {}.npy".format(
os.getcwd(), trtnam, chrm) # output file
np.save(chrfilec, cov2corr(covtmpx))
elif chrm in chrinterest:
chrfilec = "{}/Stdsim mat for {} chrm {}.npy".format(
os.getcwd(), trtnam, chrm) # output file
np.save(chrfilec, cov2corr(covtmpx))
def grtonum(numnx):
"""Map chracters to numeric (0-no of groups)."""
numnx = numnx.reset_index(drop=True)
probn = pd.unique(numnx).tolist()
alt_no = np.arange(0, len(probn), 1)
noli = numnx.tolist()
numnx = np.array(list(map(dict(zip(probn, alt_no)).get, noli, noli)))
return numnx, probn
def datret(info, rw_nms, pfnp, us_ind, slist, covmat, cov_indxx, stdsim,
progress):
"""Return sim mat based on aggregate genotypes."""
snpindexxxx = np.arange(start=0, stop=info.gmap.shape[0], step=1)
if info.meff.shape[1] == 1 and not stdsim:
mat = cov_indxx
elif info.meff.shape[1] == 1 and stdsim:
mat = cov2corr(cov_indxx)
elif info.meff.shape[1] > 1:
if info.gmap.shape[1]-3 > 1:
rw_nms = pd.DataFrame(rw_nms)
rw_nms.to_csv(f"order of inds in mat grp {pfnp}.csv", index=False)
if progress:
print('Creating similarity matrix based on aggregate genotype')
progr(0, max(pd.unique(info.gmap.iloc[:, 0])))
tmpmt1 = aggen(us_ind, info.gmap.shape[0], slist, info.indwt)
# stores ABV covariance btw inds
mat = np.zeros((len(us_ind), len(us_ind)))
# loop over chromososomes
for chrm in pd.unique(info.gmap.iloc[:, 0]):
s_ind = np.array(snpindexxxx[info.gmap.iloc[:, 0] == (chrm)])
if info.gmap.shape[1]-3 == 1:
covtmpx = abs(dgmrm(tmpmt1[:, s_ind], covmat[0][chrm-1]))
else:
covtmpx = abs(dgmrm(tmpmt1[:, s_ind], covmat[pfnp][chrm-1]))
mat = mat + covtmpx
if progress:
progr(chrm, max(pd.unique(info.gmap.iloc[:, 0])))
if stdsim:
mat = cov2corr(mat)
return mat
def mrmcals(info, us_ind, stdsim, slist, covmat, probn, chrinterest, save,
progress):
"""Compute similarity matrix for each chromosome."""
if progress:
progr(0, info.meff.columns.size)
for i in range(info.meff.columns.size):
cov_indxx = np.zeros((len(us_ind), len(us_ind)))
for chrm in pd.unique(info.gmap.iloc[:, 0]):
s_ind = np.array(np.arange(0, info.gmap.shape[0], 1
)[info.gmap.iloc[:, 0] == (chrm)])
if info.gmap.shape[1]-3 == 1: # map is 1
covtmpx = abs(dgmrm(slist[0][i][:, s_ind], covmat[0][chrm-1]))
else: # if map is more than 1
covtmpx = abs(dgmrm(slist[0][i][us_ind[:, None], s_ind],
covmat[probn][chrm-1]))
cov_indxx = cov_indxx + covtmpx # sums up chrm-specific sims
if len(pd.unique(info.group.iloc[:, 0].astype(str))) == 1:
writechrunspec(covtmpx, chrinterest, chrm,
info.meff.columns[i], stdsim)
else:
writechr(covtmpx, chrinterest, chrm, info.meff.columns[i],
probn, stdsim) # write sim to file
if stdsim:
if save is True:
if info.gmap.shape[1]-3 == 1:
covxfile = "{}/Stdsim mat for {}.npy".format(
os.getcwd(), info.meff.columns[i])
else:
covxfile = "{}/Stdsim mat for {} grp {}.npy".format(
os.getcwd(), info.meff.columns[i], probn)
np.save(covxfile, cov2corr(cov_indxx)) # write std sim mats
else:
if save is True:
if info.gmap.shape[1]-3 == 1:
covxfile = "{}/Sim mat for {}.npy".format(
os.getcwd(), info.meff.columns[i])
else:
covxfile = "{}/Sim mat for {} grp {}.npy".format(
os.getcwd(), info.meff.columns[i], probn)
np.save(covxfile, cov_indxx) # write sim matrices
if progress:
progr(i + 1, info.meff.columns.size)
return cov_indxx
def simmat_g(info, covmat, sub_id, chrinterest, save=False, stdsim=False,
progress=False):
"""
Compute similarity matrices using gametic approach.
Parameters
----------
info : class object
A class object created using the function "datacheck"
covmat : A list of pop cov matrices created using "popcovmat" function
sub_id : pandas.DataFrame with one column
Index: RangeIndex (minimum of 2 rows)
Containing ID numbers of specific individuals to be evaluated
chrinterest : str or list of int
list of chromosome numbers of interest or str with "all" or "none"
save : bool, optional; write trait-specific sim mats to file if true
stdsim : bool, optional; print write std sim mats to file if true
progress : bool, optional; print progress of the task if true
Returns
-------
multgrpcov : list containing simimlarity matrices for each group
"""
if sub_id is None:
inda = np.arange(0, info.gmat.shape[0], 1)
sub_id = pd.DataFrame(info.group.iloc[inda, 1])
aaa = subindcheck(info, sub_id)
else:
aaa = subindcheck(info, sub_id)
chrinterest = chr_int(chrinterest)
slist = traitspecmatrices(info.gmat[aaa, :], info.meff) # trt-spec mat
grp = info.gmap.shape[1]-3
if (grp == 1 and len(pd.unique(info.group.iloc[:, 0].astype(str))) > 1):
print("The same map will be used for all groups")
numbers, probn = grtonum(info.group.iloc[aaa, 0].astype(str))
multgrpcov = []
for gnp in range(grp):
multgrpcov.append([])
if grp == 1:
us_ind = np.arange(start=0, stop=info.gmat[aaa, :].shape[0],
step=1)
else:
tng = numbers == gnp
us_ind = np.array(list(compress(np.arange(0, len(tng), 1),
tng))).T
print("Processing group ", probn[gnp])
rw_nms = info.group.iloc[aaa, 1].reset_index(drop=True).astype(
str)[us_ind]
cov_indxx = mrmcals(info, us_ind, stdsim, slist, covmat, probn[gnp],
chrinterest, save, progress)
multgrpcov[int(gnp)].append(
datret(info, rw_nms, probn[gnp], us_ind, slist, covmat,
cov_indxx, stdsim, progress))
if len(probn) == 1:
break
if grp > 1 and len(probn):
multgrpcov = dict(zip(probn, multgrpcov))
return multgrpcov
def submsvmsc(msvmsc, sub_idz):
"""Extract index in msvmsc data frame."""
sub_idz = pd.DataFrame(sub_idz)
numbs = msvmsc.iloc[:, 0].astype(str).tolist()
sub_idz = sub_idz.reset_index(drop=True).squeeze()
mal = sub_idz.iloc[:, 0].astype(str).tolist()
fem = sub_idz.iloc[:, 1].astype(str).tolist()
if sub_idz is not None:
for i in mal:
if i not in numbs:
sys.exit("Individuals are not in msvmsc parameter")
for i in fem:
if i not in numbs:
sys.exit("Individuals are not in msvmsc parameter")
mal1 = [numbs.index(x) if x in numbs else None for x in mal]
fem1 = [numbs.index(x) if x in numbs else None for x in fem]
return mal1, fem1
def pot_parents(info, data, selmale, selfm):
"""Subset individuals of interest."""
trait_names = info.meff.columns
if trait_names.size == 1:
datamale = data[data.iloc[:, 1] == selmale[0]]
pos = subindcheck(info, pd.DataFrame(datamale.iloc[:, 0]))
datamale.insert(0, "pos", pos, True)
no_sire = int(datamale.shape[0] * selmale[1])
datamale = datamale.sort_values(
by=[trait_names[0]], ascending=False).iloc[0:no_sire, :]
datafemale = data[data.iloc[:, 1] == selfm[0]]
pos = subindcheck(info, pd.DataFrame(datafemale.iloc[:, 0]))
datafemale.insert(0, "pos", pos, True)
no_dam = int(datafemale.shape[0] * selfm[1])
datafemale = datafemale.sort_values(
by=[trait_names[0]], ascending=False).iloc[0:no_dam, :]
elif trait_names.size > 1:
datamale = data[data.iloc[:, 1] == selmale[0]]
pos = subindcheck(info, pd.DataFrame(datamale.iloc[:, 0]))
datamale.insert(0, "pos", pos, True)
no_sire = int(datamale.shape[0] * selmale[1])
datamale = datamale.sort_values(
by=['ABV'], ascending=False).iloc[0:no_sire, :]
datafemale = data[data.iloc[:, 1] == selfm[0]]
pos = subindcheck(info, pd.DataFrame(datafemale.iloc[:, 0]))
datafemale.insert(0, "pos", pos, True)
no_dam = int(datafemale.shape[0] * selfm[1])
datafemale = datafemale.sort_values(
by=['ABV'], ascending=False).iloc[0:no_dam, :]
matlist = np.array(np.meshgrid(
datamale.iloc[:, 0], datafemale.iloc[:, 0])).T.reshape(-1, 2)
ids = np.array(np.meshgrid(
datamale.iloc[:, 1], datafemale.iloc[:, 1])).T.reshape(-1, 2)
if trait_names.size == 1:
matndat = pd.DataFrame(index=range(matlist.shape[0]), columns=range(
4+trait_names.size))
else:
matndat = pd.DataFrame(
index=range(matlist.shape[0]), columns=range(5+trait_names.size))
matndat.iloc[:, [0, 1]] = ids
matndat.iloc[:, [2, 3]] = matlist
return matndat
def selsgebv(notr, matndat, gbv, maxmale):
"""Calculate breeding values for each trait (zygote)."""
mal = matndat.iloc[:, 2].tolist()
fem = matndat.iloc[:, 3].tolist()
if notr == 1:
matndat.iloc[:, 4] = (np.array(gbv.iloc[mal, (0+2)]) + np.array(
gbv.iloc[fem, (0+2)]))/2
elif notr > 1:
matndat.iloc[:, 4:(5+notr)] = (np.array(
gbv.iloc[mal, 2:(notr+3)]) + np.array(gbv.iloc[fem, 2:(notr+3)]))/2
idfxxx = np.unique(matndat.iloc[:, 3])
mmat = pd.DataFrame(index=range(len(idfxxx)),
columns=range(matndat.shape[1]))
for mmm in np.arange(0, len(idfxxx), 1):
axx = matndat.loc[matndat.iloc[:, 3] == idfxxx[mmm]]
tsire = np.array(axx.iloc[:, 2])
mmat.iloc[mmm, :] = axx.iloc[np.argmax(
axx.iloc[:, axx.columns.size-1]), :]
norepssire = Counter(mmat.iloc[:, 2])
lents = len(tsire)
for nrs in range(lents):
if norepssire[tsire[nrs]] <= maxmale-1:
mmat.iloc[mmm, :] = np.array(axx[axx.iloc[:, 2] == tsire[nrs]])
break
matndat = mmat
if notr == 1:
matndat.columns = np.concatenate((
['MaleID', 'FemaleID', 'MaleIndex', 'FemaleIndex'],
gbv.columns[gbv.columns.size-1]), axis=None)
else:
matndat.columns = np.concatenate((
['MaleID', 'FemaleID', 'MaleIndex', 'FemaleIndex'],
gbv.columns[2:gbv.columns.size].tolist()), axis=None)
return matndat
def selspbtizyg(notr, gbv, matndat, msvmsc, throrconst, maxmale):
"""Calculate prob of breeding top inds (zygote)."""
mal1, fem1 = submsvmsc(msvmsc, pd.DataFrame(matndat.iloc[:, 0:2]))
mal = matndat.iloc[:, 2].tolist()
fem = matndat.iloc[:, 3].tolist()
if notr == 1:
matndat.iloc[:, 4] = (np.array(gbv.iloc[mal, (0+2)]) + np.array(
gbv.iloc[fem, (0+2)]))/2
ttt = np.quantile(gbv.iloc[:, 0+2], q=1-throrconst)
msvtemp = np.array(msvmsc.iloc[mal1, 0+2]) + np.array(
msvmsc.iloc[fem1, 0+2])
matndat.iloc[:, 4] = 1 - scipy.stats.norm.cdf(
ttt, loc=matndat.iloc[:, 4], scale=np.sqrt(
msvtemp))
elif notr > 1:
trait_names = gbv.columns[2:2+notr]
colnam = np.concatenate((trait_names, "AG"), axis=None)
colnam = namesdf(notr+1, colnam).decode('utf-8')
t_ind = np.arange(colnam.shape[0])[np.in1d(colnam, trait_names)]
for i in range(notr):
matndat.iloc[:, 4+i] = (
np.array(gbv.iloc[mal, (i+2)]) + np.array(
gbv.iloc[fem, (i+2)]))/2
ttt = np.quantile(gbv.iloc[:, 2+i], q=1-throrconst)
msvtemp = np.array(msvmsc.iloc[mal1, t_ind[i]+2]) + np.array(
msvmsc.iloc[fem1, t_ind[i]+2])
matndat.iloc[:, 4+i] = 1 - scipy.stats.norm.cdf(
ttt, loc=matndat.iloc[:, 4+i], scale=np.sqrt(msvtemp))
matndat.iloc[:, 4+notr] = (
np.array(gbv.iloc[mal, (notr+2)]) + np.array(
gbv.iloc[fem, (notr+2)]))/2
ttt = np.quantile(gbv.iloc[:, 2+notr], q=1-throrconst)
msvtemp = np.array(msvmsc.loc[mal1, ["AG"]]) + np.array(
msvmsc.loc[fem1, ["AG"]])
matndat.iloc[:, 4+notr] = 1 - scipy.stats.norm.cdf(
ttt, loc=matndat.iloc[:, 4+notr], scale=np.sqrt(msvtemp.ravel()))
idfxxx = np.unique(matndat.iloc[:, 3])
mmat = pd.DataFrame(index=range(len(idfxxx)),
columns=range(matndat.shape[1]))
for mmm in np.arange(0, len(idfxxx), 1):
axx = matndat.loc[matndat.iloc[:, 3] == idfxxx[mmm]]
tsire = np.array(axx.iloc[:, 2])
mmat.iloc[mmm, :] = axx.iloc[np.argmax(
axx.iloc[:, axx.columns.size-1]), :]
norepssire = Counter(mmat.iloc[:, 2])
lents = len(tsire)
for nrs in range(lents):
if norepssire[tsire[nrs]] <= maxmale-1:
mmat.iloc[mmm, :] = np.array(axx[axx.iloc[:, 2] == tsire[nrs]])
break
matndat = mmat
if notr == 1:
matndat.columns = np.concatenate((
['MaleID', 'FemaleID', 'MaleIndex', 'FemaleIndex'],
gbv.columns[gbv.columns.size-1]), axis=None)
else:
matndat.columns = np.concatenate((
['MaleID', 'FemaleID', 'MaleIndex', 'FemaleIndex'],
gbv.columns[2:gbv.columns.size].tolist()), axis=None)
return matndat
def selsindex(notr, gbv, matndat, msvmsc, throrconst, maxmale):
"""Calculate the index if constant is known (zygote)."""
mal1, fem1 = submsvmsc(msvmsc, pd.DataFrame(matndat.iloc[:, 0:2]))
mal = matndat.iloc[:, 2].tolist()
fem = matndat.iloc[:, 3].tolist()
if notr == 1:
matndat.iloc[:, 4] = (np.array(gbv.iloc[mal, (0+2)]) + np.array(
gbv.iloc[fem, (0+2)]))/2
msvtemp = np.array(msvmsc.iloc[mal1, 0+2]) + np.array(
msvmsc.iloc[fem1, 0+2])
matndat.iloc[:, 4] = matndat.iloc[:, 4] + np.sqrt(msvtemp)*throrconst
elif notr > 1:
trait_names = gbv.columns[2:2+notr]
colnam = np.concatenate((trait_names, "AG"), axis=None)
colnam = namesdf(notr+1, colnam).decode('utf-8')
t_ind = np.arange(colnam.shape[0])[np.in1d(colnam, trait_names)]
for i in range(notr):
matndat.iloc[:, 4+i] = (
np.array(gbv.iloc[mal, (i+2)]) + np.array(
gbv.iloc[fem, (i+2)]))/2
msvtemp = np.array(msvmsc.iloc[mal1, t_ind[i]+2]) + np.array(
msvmsc.iloc[fem1, t_ind[i]+2])
matndat.iloc[:, 4+i] = matndat.iloc[:, 4+i] + np.sqrt(
msvtemp)*throrconst
matndat.iloc[:, 4+notr] = (
np.array(gbv.iloc[mal, (notr+2)]) + np.array(
gbv.iloc[fem, (notr+2)]))/2
msvtemp = np.array(msvmsc.loc[mal1, ["AG"]]) + np.array(
msvmsc.loc[fem1, ["AG"]])
matndat.iloc[:, 4+notr] = matndat.iloc[:, 4+notr] + (
np.sqrt(msvtemp)*throrconst).ravel()
idfxxx = np.unique(matndat.iloc[:, 3])
mmat = pd.DataFrame(index=range(len(idfxxx)),
columns=range(matndat.shape[1]))
for mmm in np.arange(0, len(idfxxx), 1):
axx = matndat.loc[matndat.iloc[:, 3] == idfxxx[mmm]]
tsire = np.array(axx.iloc[:, 2])
mmat.iloc[mmm, :] = axx.iloc[np.argmax(
axx.iloc[:, axx.columns.size-1]), :]
norepssire = Counter(mmat.iloc[:, 2])
lents = len(tsire)
for nrs in range(lents):
if norepssire[tsire[nrs]] <= maxmale-1:
mmat.iloc[mmm, :] = np.array(axx[axx.iloc[:, 2] == tsire[nrs]])
break
matndat = pd.DataFrame(mmat)
if notr == 1:
matndat.columns = np.concatenate((
['MaleID', 'FemaleID', 'MaleIndex', 'FemaleIndex'],
gbv.columns[gbv.columns.size-1]), axis=None)
else:
matndat.columns = np.concatenate((
['MaleID', 'FemaleID', 'MaleIndex', 'FemaleIndex'],
gbv.columns[2:gbv.columns.size].tolist()), axis=None)
return matndat
def subindcheckzyg(info, sub_idz):
"""Check sex and if matepairs provided in sub_idz are in group data."""
numbs = info.group.iloc[:, 1].astype(str).tolist()
sub_idz = pd.DataFrame(sub_idz).reset_index(drop=True).squeeze()
mal = sub_idz.iloc[:, 0].astype(str).tolist()
fem = sub_idz.iloc[:, 1].astype(str).tolist()
mal1 = [numbs.index(x) if x in numbs else None for x in mal]
fem1 = [numbs.index(x) if x in numbs else None for x in fem]
if len(pd.unique(info.group.iloc[mal1, 0])) != 1:
sys.exit("Group class in sub_idz is not unique to ID of males")
if len(pd.unique(info.group.iloc[fem1, 0])) != 1:
sys.exit("Group class in sub_idz is not unique to ID of females")
idn = sub_idz.reset_index(drop=True)
mgp = list(set(info.group.iloc[mal1, 0]))
fgp = list(set(info.group.iloc[fem1, 0]))
if len(mgp) > 1 or len(fgp) > 1:
sys.exit("multiple sexes detected in data")
probn = [mgp[0], fgp[0]]
return mal1, fem1, idn, probn
def calcgbvzygsub(info, sub_idz):
"""Calc breeding values for matepairs."""
mal1, fem1, idn, _ = subindcheckzyg(info, sub_idz)
no_individuals, trait_names = idn.shape[0], info.meff.columns
notr = trait_names.size
if notr == 1:
gbv = np.zeros((no_individuals, notr))
mefff = np.array(info.meff.iloc[:, 0], float)
matrix_me1 = makemebv(info.gmat[mal1, :], mefff)
matrix_me2 = makemebv(info.gmat[fem1, :], mefff)
gbv[:, 0] = (matrix_me1.sum(axis=1) + matrix_me2.sum(axis=1))/2
gbv = pd.DataFrame(gbv)
gbv.columns = trait_names
elif notr > 1:
gbv = np.zeros((no_individuals, notr+1))
for i in range(notr):
mefff = np.array(info.meff.iloc[:, i], float)
matrix_me1 = makemebv(info.gmat[mal1, :], mefff)
matrix_me2 = makemebv(info.gmat[fem1, :], mefff)
gbv[:, i] = (matrix_me1.sum(axis=1) + matrix_me2.sum(axis=1))/2
gbv[:, notr] = gbv[:, notr] + info.indwt[i]*gbv[:, i]
gbv = pd.DataFrame(gbv)
colnames = np.concatenate((trait_names, "ABV"), axis=None)
gbv.columns = colnames
gbv.insert(0, "FemaleIndex", fem1, True) # insert ID
gbv.insert(0, "MaleIndex", mal1, True) # insert ID
gbv.insert(0, "FemaleID", idn.iloc[:, 1], True) # insert ID
gbv.insert(0, "MaleID", idn.iloc[:, 0], True) # insert ID
return gbv
def calcprobzygsub(info, msvmsc, thresh, sub_idz):
"""Calculate the probability of breeding top individuals."""
subindcheckzyg(info, sub_idz)
mal1, fem1 = submsvmsc(msvmsc, sub_idz)
gbv = calcgbvzygsub(info, sub_idz)
trait_names = info.meff.columns # traits names
notr = trait_names.size
gbvall = calcgbv(info, None)
if notr == 1:
probdf = np.zeros((gbv.shape[0], notr))
ttt = np.quantile(gbvall.iloc[:, (0+2)], q=1-thresh)
msvmsc111 = np.array(msvmsc.iloc[mal1, (0+2)]) + np.array(
msvmsc.iloc[fem1, (0+2)])
probdf[:, 0] = 1 - scipy.stats.norm.cdf(
ttt, loc=gbv.iloc[:, (0+4)], scale=np.sqrt(msvmsc111))
probdf = pd.DataFrame(probdf)
probdf.columns = trait_names
elif notr > 1:
colnam = np.concatenate((trait_names, "AG"), axis=None)
colnam = namesdf(notr+1, colnam).decode('utf-8')
probdf = np.zeros((gbv.shape[0], notr+1))
t_ind = np.arange(colnam.shape[0])[np.in1d(colnam, trait_names)]
for i in range(notr):
ttt = np.quantile(gbvall.iloc[:, (i+2)], q=1-thresh)
msvmsc111 = np.array(msvmsc.iloc[mal1, (t_ind[i])+2]) + np.array(
msvmsc.iloc[fem1, (t_ind[i])+2])
probdf[:, i] = scipy.stats.norm.cdf(
ttt, loc=gbv.iloc[:, (i+4)], scale=np.sqrt(msvmsc111))
probdf[:, i] = np.nan_to_num(probdf[:, i])
probdf[:, i] = 1 - probdf[:, i]
ttt = np.quantile(gbvall.iloc[:, (notr+2)], q=1-thresh)
msvmsc111 = np.array(msvmsc.loc[mal1, ["AG"]]) + np.array(
msvmsc.loc[fem1, ["AG"]])
probdf[:, notr] = scipy.stats.norm.cdf(
ttt, loc=gbv.iloc[:, (notr+4)], scale=np.sqrt(msvmsc111.ravel()))
probdf[:, notr] = np.nan_to_num(probdf[:, notr])
probdf[:, notr] = 1 - probdf[:, notr]
probdf = pd.DataFrame(probdf) # convert matrix to dataframe
colnames = np.concatenate((trait_names, "ABV"), axis=None)
probdf.columns = colnames
probdf = pd.concat([gbv.iloc[:, 0:4], probdf], axis=1)
return probdf
def calcindexzygsub(info, msvmsc, const, sub_idz):
"""Calc index matepairs if constant is known."""
subindcheckzyg(info, sub_idz)
mal1, fem1 = submsvmsc(msvmsc, sub_idz)
gbv = calcgbvzygsub(info, sub_idz)
trait_names = info.meff.columns # traits names
notr = trait_names.size
if notr == 1:
indexdf = np.zeros((gbv.shape[0], notr))
msvmsc111 = np.array(msvmsc.iloc[mal1, (0+2)]) + np.array(
msvmsc.iloc[fem1, (0+2)])
indexdf[:, 0] = gbv.iloc[:, (0+4)] + np.sqrt(msvmsc111)*const
indexdf = pd.DataFrame(indexdf)
indexdf.columns = trait_names
elif notr > 1:
colnam = np.concatenate((trait_names, "AG"), axis=None)
colnam = namesdf(notr+1, colnam).decode('utf-8')
indexdf = np.zeros((gbv.shape[0], notr+1))
t_ind = np.arange(colnam.shape[0])[np.in1d(colnam, trait_names)]
for i in range(notr):
msvmsc111 = np.array(msvmsc.iloc[mal1, (t_ind[i])+2]) + np.array(
msvmsc.iloc[fem1, (t_ind[i])+2])
indexdf[:, i] = gbv.iloc[:, (i+4)] + np.sqrt(msvmsc111)*const
msvmsc111 = np.array(msvmsc.loc[mal1, ["AG"]]) + np.array(
msvmsc.loc[fem1, ["AG"]])
indexdf[:, notr] = gbv.iloc[:, (notr+4)] + (
np.sqrt(msvmsc111)*const).ravel()
indexdf = pd.DataFrame(indexdf)
colnames = np.concatenate((trait_names, "ABV"), axis=None)
indexdf.columns = colnames
indexdf = pd.concat([gbv.iloc[:, 0:4], indexdf], axis=1) # grp
return indexdf
def selstrat_z(selstrat, info, sub_idz, msvmsc, throrconst, selmale, selfm,
maxmale):
"""
Calculate selection criteria (GEBV, PBTI, or index) for zygotes.
Parameters
----------
selstrat : str
A str containing any of GEBV, PBTI or index
info : class object
A class object created using the function "datacheck"
sub_idz : pandas.DataFrame
Index: RangeIndex (minimum of 2 rows)
Containing ID numbers of specific individuals to be evaluated.
The 1st and 2nd columns must be IDS of males and females, respectively.
msvmsc : pandas.DataFrame
DF created using the function "msvarcov_g"
throrconst : float
If selstrat is PBTI, a throrconst of value 0.05 sets threshold at
top 5% of GEBV of the population. If selstrat is index, throrconst
a constant.
selmale : list
list of two items. 1st item is the str coding for males as in group
dataframe. The 2nd item is a float representing x% of males to be used
selfm : list
list of two items. 1st item is the str coding for females as in group
dataframe.The 2nd item is a float representing x% of females to be used
maxmale : integer
maximum number of allocations for males
Returns
-------
matndat : pandas.DataFrame
Index: RangeIndex
Columns:
MaleID, FemaleID, MaleIndex, FemaleIndex, trait names and ABV
Note: If selstrat is GEBV, None may be used for throrconst and msvmsc.
If sub_idz is None and selstrat is GEBV, GEBVs will be estimated for all
individuals. However, if selstrat is not GEBV, the chosen selection
criterion will be estimated for all individuals in msvmsc data frame.
"""
if len( | pd.unique(info.group.iloc[:, 0]) | pandas.unique |
from os.path import basename, splitext
import pandas as pd
from peakachulib.bam_to_bed import BamToBed
from peakachulib.count import ReadCounter
class Library(object):
'''
This class reads the alignment file for a library and counts and stores
the reads mapping to different annotations
'''
def __init__(self, paired_end, max_insert_size, bam_file, replicon_dict):
self.paired_end = paired_end
self.bam_file = bam_file
self.max_insert_size = max_insert_size
self.lib_name = splitext(basename(bam_file))[0]
self.replicon_dict = replicon_dict
def count_reads_for_windows(self):
read_counter = ReadCounter(self.paired_end, self.max_insert_size,
self.bam_file)
for replicon in self.replicon_dict:
self.replicon_dict[replicon]['window_counts'] = pd.DataFrame()
for strand in ['+', '-']:
window_counts = read_counter.count_reads_for_windows(
replicon,
strand,
self.replicon_dict[replicon]["window_list"])
self.replicon_dict[replicon]['window_counts'][
strand] = window_counts
read_counter.close_bam()
def count_reads_for_peaks(self):
read_counter = ReadCounter(self.paired_end, self.max_insert_size,
self.bam_file)
for replicon in self.replicon_dict:
peak_counts = read_counter.count_reads_for_peaks(
replicon,
self.replicon_dict[replicon]["peak_df"].to_dict('records'))
del self.replicon_dict[replicon]["peak_df"]
self.replicon_dict[replicon]["peak_counts"] = peak_counts
read_counter.close_bam()
def merge_reads(self):
bam_to_bed = BamToBed(self.paired_end, self.max_insert_size)
for replicon, reads in bam_to_bed.generate_bed_format(self.bam_file):
self.replicon_dict[replicon]["reads"] = | pd.Series(reads) | pandas.Series |
import pandas as pd
def merge_genes_from_uniprot_ensembl_db(ensembls: pd.DataFrame, proteins: pd.DataFrame,
uniprots: pd.DataFrame) -> pd.DataFrame:
uniprots_filtered = merge_genes_cellphone(proteins, uniprots)
merged_genes = pd.merge(uniprots_filtered, ensembls, left_on=0, right_on='Gene name')
result = _merge_ensembl_uniprots(merged_genes)
result = result[~result['gene_name'].str.contains('HLA')]
result = result[result['uniprot'].apply(lambda uniprot: uniprot in proteins['uniprot'].tolist())]
result = result.sort_values(by=list(result.columns.values))
return result
def _merge_ensembl_uniprots(merged_genes: pd.DataFrame) -> pd.DataFrame:
merged_from_uniprot = | pd.DataFrame() | pandas.DataFrame |
from matplotlib import rcParams
from matplotlib.cm import rainbow
import warnings
warnings.filterwarnings('ignore')
from sklearn.tree import DecisionTreeClassifier
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix,accuracy_score,roc_auc_score,classification,roc_curve
from sklearn.model_selection import RandomizedSearchCV
import numpy as np
from sklearn.model_selection import train_test_split as tts
from sklearn.metrics import classification_report
from yellowbrick.classifier import ClassificationReport
from sklearn.metrics import log_loss
from matplotlib import pyplot
from numpy import array
from sklearn import preprocessing
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
from mlxtend.feature_selection import SequentialFeatureSelector as SFS
from sklearn.feature_selection import SelectFromModel
from sklearn.feature_selection import f_classif
from sklearn.feature_selection import mutual_info_classif
from sklearn.feature_selection import SelectKBest,SelectPercentile
from mlxtend.classifier import StackingClassifier
from sklearn.externals import joblib
from sklearn.naive_bayes import GaussianNB
import wx
import wx.xrc
data = pd.read_csv("heart.csv")
data.head(5)
col = data.columns
data.isnull().sum()
X = data.drop(['target'],1)
y=data['target']
X_train, X_test, y_train, y_test = tts(
X,
y,
test_size=0.3,
random_state=0)
X_train.shape, X_test.shape
def plot_roc_curve(fpr, tpr):
plt.plot(fpr, tpr, color='orange', label='ROC')
plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic (ROC) Curve')
plt.legend()
plt.show()
def RF(X_train,y_train,X_test,y_test):
ada = RandomForestClassifier(n_estimators = 24,random_state=5)
ada.fit(X_train,y_train)
print("Random Forest:train set")
y_pred = ada.predict(X_train)
pred=ada.predict_proba(X_test)
print("Random Forest:Confusion Matrix: ", confusion_matrix(y_train, y_pred))
print ("Random Forest:Accuracy : ", accuracy_score(y_train,y_pred)*100)
print("Random Forest:Test set")
y_pred = ada.predict(X_test)
print("Random Forest:Confusion Matrix: ", confusion_matrix(y_test, y_pred))
print ("Random Forest:Accuracy : ", accuracy_score(y_test,y_pred)*100)
#confusion Matrix
matrix =confusion_matrix(y_test, y_pred)
class_names=[0,1]
fig, ax = plt.subplots()
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names)
plt.yticks(tick_marks, class_names)
sns.heatmap( | pd.DataFrame(matrix) | pandas.DataFrame |
# ============================================================================================================
# Purpose: Generates the Scenario Based Graphs using Gatling Simulation.
# Author: <NAME> (Nav)
# Notes: Script reads its inputs from the given config File.
# Parameters read from config file are:
# 1. Simulation Logs
# Revision: Last change: 05/09/18 by Nav :: Created and tested the script
# ==============================================================================================================
import pandas as pd
from pathlib import Path
import time
import getopt
import sys
import logging
from bokeh.layouts import Column
from bokeh.models import (HoverTool, Legend, LinearAxis, Range1d, ColumnDataSource)
from bokeh.models.formatters import DatetimeTickFormatter
from bokeh.palettes import d3
from bokeh.plotting import figure, output_file, show
##################################################################################################################
# Function Name: Generate_Gatling_Log_Df
# Description : Consumes the Gatling Logs and Return a clean Dataframe which can be used by other functions
# @param : List of Simulation Logs
# @return : Dataframe gat_log_graph_df with columns: [Owner,Scenario,Transaction_Name,Status,ResponseTime,
# LocalTime]
# Author : <NAME>
# Comments : Created on 05/09/2018
##################################################################################################################
def generate_gatling_log_df(simulation_logs_list):
# Column Names
gat_log_col_names = ["Owner", "Scenario", "ThreadId", "JunkCol1",
"Transaction_Name", "StartTime", "EndTime", "Status"]
# Reading into Dataframe
gat_log_df = pd.read_csv(simulation_logs_list[0], sep='\t', header=None, names=gat_log_col_names, dtype=str)
for index in range(len(simulation_logs_list)-1):
gat_log_df_1 = pd.read_csv(simulation_logs_list[index+1], sep='\t', header=None, names=gat_log_col_names, dtype=str)
gat_log_df = gat_log_df.append(gat_log_df_1)
gat_log_df = gat_log_df.reset_index(drop=True)
gat_log_df.to_csv("temp.csv")
# Fill NaN values with default value
gat_log_df = gat_log_df.fillna("0")
# Get Dataframe for Graphs
gat_log_graph_df = gat_log_df.loc[gat_log_df["Status"] != "KO"]
gat_log_graph_df = gat_log_graph_df[gat_log_graph_df["Owner"] != "GROUP"]
gat_log_graph_df = gat_log_graph_df[gat_log_graph_df["Owner"] != "RUN"]
# Set correct dtypes
gat_log_graph_df[['StartTime', 'EndTime']] = gat_log_graph_df[['StartTime', 'EndTime']].apply(pd.to_numeric)
# Calculate Response Time
gat_log_graph_df["ResponseTime"] = gat_log_graph_df.EndTime - gat_log_graph_df.StartTime
gat_log_graph_df[['StartTime', 'EndTime']] = gat_log_graph_df[['StartTime', 'EndTime']].apply(pd.to_numeric)
gat_log_graph_df['LocalTime'] = gat_log_graph_df['StartTime'] + (10 * 60 * 60 * 1000)
# Drop Unnecessary Columns
gat_log_graph_df = gat_log_graph_df.drop(["JunkCol1"], axis=1)
gat_log_graph_df = gat_log_graph_df.drop(["ThreadId"], axis=1)
gat_log_graph_df = gat_log_graph_df.drop(["StartTime"], axis=1)
gat_log_graph_df = gat_log_graph_df.drop(["EndTime"], axis=1)
return gat_log_graph_df
########################################################################################################################
##################################################################################################################
# Function Name: Get_Scenario_Metrics
# Description : Calculates the Active Users, NinetyFifth of the given scenario
# @param : Scenario Name
# @param : Gatling Log Dataframe
# @return : Dataframe scenario_metrics_df with columns: [LocalTime, Users, ${TransactionNames}]
# @return : Dataframe overall_transaction_ninety_fifth_df with columns: [Transaction, NinetyFifth]
# Author : <NAME>
# Comments : Created on 05/09/2018
##################################################################################################################
def get_scenario_metrics(scenario_name, gatling_log_df):
# Set the index
row_count = len(gatling_log_df.index)
# Create new Scenario Dataframe
cond_col = gatling_log_df['Scenario'] == scenario_name
scenario_temp_df = gatling_log_df[cond_col]
# Transactions OK DF
scenario_ok_df = scenario_temp_df.loc[scenario_temp_df["Status"] == "OK"]
# Active Threads DF
scenario_users_df = scenario_temp_df.loc[scenario_temp_df["Owner"] == "USER"]
# New Dataframe
scenario_metrics_df = pd.DataFrame(columns=["LocalTime"])
# Add Active Users
active_users = 0
scenario_users_temp_df = pd.DataFrame(columns=["LocalTime", "Users"])
# Start Begin and End Time
begin_time = scenario_users_df["LocalTime"][scenario_users_df.index[0]]
end_time = scenario_users_df["LocalTime"][scenario_users_df.index[-1]]
# Entry when users would have been zero 1 sec prior to test
scenario_users_temp_df.loc[0] = (begin_time - 1000, 0)
# Just get Divisor to know how many loops to go through
loop_count = (end_time - begin_time) // 1000
for i in range(loop_count):
end_time = begin_time + 1000
tmp_df = scenario_users_df[
(scenario_users_df["LocalTime"] >= begin_time) & (scenario_users_df["LocalTime"] <= end_time)]
active_users = active_users + tmp_df["Scenario"].count()
scenario_users_temp_df.loc[i + row_count] = [begin_time, active_users]
begin_time = end_time
# To calculate for the last TimeInterval
end_time = scenario_users_df["LocalTime"][scenario_users_df.index[-1]]
tmp_df = scenario_users_df[(scenario_users_df["LocalTime"] >= begin_time) &
(scenario_users_df["LocalTime"] <= end_time)]
active_users = active_users + tmp_df["Scenario"].count()
scenario_users_temp_df.loc[-1] = [begin_time, active_users]
# Refresh the index
scenario_users_temp_df = scenario_users_temp_df.reset_index(drop=True)
scenario_users_temp_df = scenario_users_temp_df.applymap(str)
# Merge the dataframe of Active Users
scenario_metrics_df = scenario_metrics_df.merge(scenario_users_temp_df, on='LocalTime', how='outer')
# Overall NinetyFifth
overall_transaction_ninety_fifth_df = pd.DataFrame(columns=["Transaction", "NinetyFifth"])
# Get the transaction list in the scenario
transactions_list = scenario_ok_df.Transaction_Name.unique().tolist()
for transaction_index in range(len(transactions_list)):
transaction_name = transactions_list[transaction_index]
# Make DF for transaction
temp_df = pd.DataFrame(columns=["LocalTime", "TransactionName"])
# Create new Transaction Dataframe
transaction_df = scenario_ok_df[scenario_ok_df['Transaction_Name'] == transaction_name]
# Calculate the overall 95th of the Transaction
overall_transaction_ninety_fifth_df.loc[transaction_index] = [transaction_name, transaction_df.ResponseTime.quantile(0.95)]
# Start Begin and End Time
begin_time = transaction_df["LocalTime"][transaction_df.index[0]]
end_time = transaction_df["LocalTime"][transaction_df.index[-1]]
# Just get Divisor
loop_count = (end_time - begin_time) // 1000
for i in range(loop_count):
end_time = begin_time + 1000
tmp_df = transaction_df[
(transaction_df["LocalTime"] >= begin_time) & (transaction_df["LocalTime"] <= end_time)]
temp_df.loc[i + row_count] = [begin_time, tmp_df.ResponseTime.quantile(0.95)]
begin_time = end_time
# To calculate for the last TimeInterval
end_time = transaction_df["LocalTime"][transaction_df.index[-1]]
tmp_df = transaction_df[(transaction_df["LocalTime"] >= begin_time) & (transaction_df["LocalTime"] <= end_time)]
temp_df.loc[-1] = [begin_time, tmp_df.ResponseTime.quantile(0.95)]
# Clean the Dataframe from NaN values
temp_df = temp_df.dropna(how='any')
# Refresh the index
temp_df = temp_df.reset_index(drop=True)
# Rename the columns and set datatype to str of all values
temp_df.rename(columns={'TransactionName': transaction_name}, inplace=True)
temp_df = temp_df.applymap(str)
# Join two Dataframes
scenario_metrics_df = scenario_metrics_df.merge(temp_df, on='LocalTime', how='outer')
# Changing LocalTime to DateTime and sort the Time in Ascending order
scenario_metrics_df['LocalTime'] = | pd.to_datetime(scenario_metrics_df['LocalTime'], unit='ms') | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
build_df.py
Purpose:
Build the dataframe with all the necessary information
Version:
1 First start
Date:
2021/06/07
Author:
<NAME>
"""
###########################################################
### Imports
import os
import pandas as pd
from readwrite_outline import readjson
# import pandas as pd
# import matplotlib.pyplot as plt
###########################################################
### main
def main():
# Magic numbers
path = os.getcwd()
handles = | pd.read_pickle(path + '/Input/handles_df.pkl') | pandas.read_pickle |
# Created on 2020/7/15
# This module is for the class TimeSeries and related functions.
# Standard library imports
from datetime import datetime
from typing import Any, Callable, Optional, Union
import warnings
# Third party imports
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytz
import scipy.stats as stats
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
from sklearn.gaussian_process import GaussianProcessRegressor, kernels
from statsmodels.api import OLS
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from typeguard import typechecked
# Local application imports
from .. import exceptions
# Dictionary of Pandas' Offset Aliases
# and their numbers of appearance in a year.
DPOA = {'D': 365, 'B': 252, 'W': 52,
'SM': 24, 'SMS': 24,
'BM': 12, 'BMS': 12, 'M': 12, 'MS': 12,
'BQ': 4, 'BQS': 4, 'Q': 4, 'QS': 4,
'Y': 1, 'A':1}
# Datetimes format
fmt = "%Y-%m-%d %H:%M:%S"
fmtz = "%Y-%m-%d %H:%M:%S %Z%z"
#---------#---------#---------#---------#---------#---------#---------#---------#---------#
@typechecked
def get_list_timezones() -> None:
"""
Lists all the time zone names that can be used.
"""
print(pytz.all_timezones)
return None
# CLASS Series
@typechecked
class Series:
"""
Abstract class defining a Series and its methods.
This class serves as a parent class for TimeSeries and CatTimeSeries.
Attributes
----------
data : pandas.Series or pandas.DataFrame
Contains a time-like index and for each time a single value.
start_utc : Pandas.Timestamp
Starting date.
end_utc : Pandas.Timestamp
Ending date.
nvalues : int
Number of values, i.e. also of dates.
freq : str or None
Frequency inferred from index.
name : str
Name or nickname of the series.
unit : str or None
Unit of the series values.
tz : str
Timezone name.
timezone : pytz timezone
Timezone associated with dates.
"""
def __init__(self,
data: Union[pd.Series, pd.DataFrame, None]=None,
tz: str=None,
unit: str=None,
name: str=None
) -> None:
"""
Receives a panda.Series or pandas.DataFrame as an argument and initializes the time series.
"""
# Deal with DataFrame / Series
if (data is None) or (data.empty is True):
self.data = | pd.Series(index=None, data=None) | pandas.Series |
import re
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from woodwork import DataColumn, DataTable
from woodwork.datatable import _check_unique_column_names
from woodwork.logical_types import (
URL,
Boolean,
Categorical,
CountryCode,
Datetime,
Double,
EmailAddress,
Filepath,
FullName,
Integer,
IPAddress,
LatLong,
LogicalType,
NaturalLanguage,
Ordinal,
PhoneNumber,
SubRegionCode,
Timedelta,
ZIPCode
)
from woodwork.tests.testing_utils import (
check_column_order,
mi_between_cols,
to_pandas,
validate_subset_dt
)
from woodwork.utils import import_or_none
dd = import_or_none('dask.dataframe')
dask_delayed = import_or_none('dask.delayed')
ks = import_or_none('databricks.koalas')
def test_datatable_df_property(sample_df):
dt = DataTable(sample_df)
assert dt.df is sample_df
pd.testing.assert_frame_equal(to_pandas(dt.df), to_pandas(sample_df))
def test_datatable_with_numeric_datetime_time_index(time_index_df):
dt = DataTable(time_index_df, time_index='ints', logical_types={'ints': Datetime})
error_msg = 'Time index column must contain datetime or numeric values'
with pytest.raises(TypeError, match=error_msg):
DataTable(time_index_df, name='datatable', time_index='strs', logical_types={'strs': Datetime})
assert dt.time_index == 'ints'
assert dt.to_dataframe()['ints'].dtype == 'datetime64[ns]'
def test_datatable_with_numeric_time_index(time_index_df):
# Set a numeric time index on init
dt = DataTable(time_index_df, time_index='ints')
date_col = dt['ints']
assert dt.time_index == 'ints'
assert date_col.logical_type == Integer
assert date_col.semantic_tags == {'time_index', 'numeric'}
# Specify logical type for time index on init
dt = DataTable(time_index_df, time_index='ints', logical_types={'ints': 'Double'})
date_col = dt['ints']
assert dt.time_index == 'ints'
assert date_col.logical_type == Double
assert date_col.semantic_tags == {'time_index', 'numeric'}
# Change time index to normal datetime time index
dt = dt.set_time_index('times')
date_col = dt['ints']
assert dt.time_index == 'times'
assert date_col.logical_type == Double
assert date_col.semantic_tags == {'numeric'}
# Set numeric time index after init
dt = DataTable(time_index_df, logical_types={'ints': 'Double'})
dt = dt.set_time_index('ints')
date_col = dt['ints']
assert dt.time_index == 'ints'
assert date_col.logical_type == Double
assert date_col.semantic_tags == {'time_index', 'numeric'}
def test_datatable_adds_standard_semantic_tags(sample_df):
dt = DataTable(sample_df,
name='datatable',
logical_types={
'id': Categorical,
'age': Integer,
})
assert dt.semantic_tags['id'] == {'category'}
assert dt.semantic_tags['age'] == {'numeric'}
def test_check_unique_column_names(sample_df):
if ks and isinstance(sample_df, ks.DataFrame):
pytest.skip("Koalas enforces unique column names")
duplicate_cols_df = sample_df.copy()
if dd and isinstance(sample_df, dd.DataFrame):
duplicate_cols_df = dd.concat([duplicate_cols_df, duplicate_cols_df['age']], axis=1)
else:
duplicate_cols_df.insert(0, 'age', [18, 21, 65, 43], allow_duplicates=True)
with pytest.raises(IndexError, match='Dataframe cannot contain duplicate columns names'):
_check_unique_column_names(duplicate_cols_df)
def test_datatable_types(sample_df):
new_dates = ["2019~01~01", "2019~01~02", "2019~01~03", "2019~01~04"]
if dd and isinstance(sample_df, dd.DataFrame):
sample_df['formatted_date'] = pd.Series(new_dates)
else:
sample_df['formatted_date'] = new_dates
ymd_format = Datetime(datetime_format='%Y~%m~%d')
dt = DataTable(sample_df, logical_types={'formatted_date': ymd_format})
returned_types = dt.types
assert isinstance(returned_types, pd.DataFrame)
assert 'Physical Type' in returned_types.columns
assert 'Logical Type' in returned_types.columns
assert 'Semantic Tag(s)' in returned_types.columns
assert returned_types.shape[1] == 3
assert len(returned_types.index) == len(sample_df.columns)
assert all([dc.logical_type in ww.type_system.registered_types or isinstance(dc.logical_type, LogicalType) for dc in dt.columns.values()])
correct_logical_types = {
'id': Integer,
'full_name': NaturalLanguage,
'email': NaturalLanguage,
'phone_number': NaturalLanguage,
'age': Integer,
'signup_date': Datetime,
'is_registered': Boolean,
'formatted_date': ymd_format
}
correct_logical_types = pd.Series(list(correct_logical_types.values()),
index=list(correct_logical_types.keys()))
assert correct_logical_types.equals(returned_types['Logical Type'])
for tag in returned_types['Semantic Tag(s)']:
assert isinstance(tag, str)
def test_datatable_typing_info_with_col_names(sample_df):
dt = DataTable(sample_df)
typing_info_df = dt._get_typing_info(include_names_col=True)
assert isinstance(typing_info_df, pd.DataFrame)
assert 'Data Column' in typing_info_df.columns
assert 'Physical Type' in typing_info_df.columns
assert 'Logical Type' in typing_info_df.columns
assert 'Semantic Tag(s)' in typing_info_df.columns
assert typing_info_df.shape[1] == 4
assert typing_info_df.iloc[:, 0].name == 'Data Column'
assert len(typing_info_df.index) == len(sample_df.columns)
assert all([dc.logical_type in LogicalType.__subclasses__() or isinstance(dc.logical_type, LogicalType) for dc in dt.columns.values()])
correct_logical_types = {
'id': Integer,
'full_name': NaturalLanguage,
'email': NaturalLanguage,
'phone_number': NaturalLanguage,
'age': Integer,
'signup_date': Datetime,
'is_registered': Boolean,
}
correct_logical_types = pd.Series(list(correct_logical_types.values()),
index=list(correct_logical_types.keys()))
assert correct_logical_types.equals(typing_info_df['Logical Type'])
for tag in typing_info_df['Semantic Tag(s)']:
assert isinstance(tag, str)
correct_column_names = pd.Series(list(sample_df.columns),
index=list(sample_df.columns))
assert typing_info_df['Data Column'].equals(correct_column_names)
def test_datatable_head(sample_df):
dt = DataTable(sample_df, index='id', logical_types={'email': 'EmailAddress'}, semantic_tags={'signup_date': 'birthdat'})
head = dt.head()
assert isinstance(head, pd.DataFrame)
assert isinstance(head.columns, pd.MultiIndex)
if dd and isinstance(sample_df, dd.DataFrame):
assert len(head) == 2
else:
assert len(head) == 4
for i in range(len(head.columns)):
name, dtype, logical_type, tags = head.columns[i]
dc = dt[name]
# confirm the order is the same
assert dt._dataframe.columns[i] == name
# confirm the rest of the attributes match up
assert dc.dtype == dtype
assert dc.logical_type == logical_type
assert str(list(dc.semantic_tags)) == tags
shorter_head = dt.head(1)
assert len(shorter_head) == 1
assert head.columns.equals(shorter_head.columns)
def test_datatable_repr(small_df):
dt = DataTable(small_df)
dt_repr = repr(dt)
expected_repr = ' Physical Type Logical Type Semantic Tag(s)\nData Column \nsample_datetime_series datetime64[ns] Datetime []'
assert dt_repr == expected_repr
dt_html_repr = dt._repr_html_()
expected_repr = '<table border="1" class="dataframe">\n <thead>\n <tr style="text-align: right;">\n <th></th>\n <th>Physical Type</th>\n <th>Logical Type</th>\n <th>Semantic Tag(s)</th>\n </tr>\n <tr>\n <th>Data Column</th>\n <th></th>\n <th></th>\n <th></th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>sample_datetime_series</th>\n <td>datetime64[ns]</td>\n <td>Datetime</td>\n <td>[]</td>\n </tr>\n </tbody>\n</table>'
assert dt_html_repr == expected_repr
def test_datatable_repr_empty(empty_df):
dt = DataTable(empty_df)
assert repr(dt) == 'Empty DataTable'
assert dt._repr_html_() == 'Empty DataTable'
assert dt.head() == 'Empty DataTable'
def test_set_types_combined(sample_df):
dt = DataTable(sample_df, index='id', time_index='signup_date')
assert dt['signup_date'].semantic_tags == set(['time_index'])
assert dt['signup_date'].logical_type == Datetime
assert dt['age'].semantic_tags == set(['numeric'])
assert dt['age'].logical_type == Integer
assert dt['is_registered'].semantic_tags == set()
assert dt['is_registered'].logical_type == Boolean
assert dt['email'].logical_type == NaturalLanguage
assert dt['phone_number'].logical_type == NaturalLanguage
semantic_tags = {
'signup_date': ['test1'],
'age': [],
'is_registered': 'test2'
}
logical_types = {
'email': 'EmailAddress',
'phone_number': PhoneNumber,
'age': 'Double'
}
dt = dt.set_types(logical_types=logical_types, semantic_tags=semantic_tags)
assert dt['signup_date'].semantic_tags == set(['test1', 'time_index'])
assert dt['signup_date'].logical_type == Datetime
assert dt['age'].semantic_tags == set(['numeric'])
assert dt['age'].logical_type == Double
assert dt['is_registered'].semantic_tags == set(['test2'])
assert dt['is_registered'].logical_type == Boolean
assert dt['email'].logical_type == EmailAddress
assert dt['phone_number'].logical_type == PhoneNumber
def test_new_dt_from_columns(sample_df):
dt = DataTable(sample_df, time_index='signup_date', index='id', name='dt_name')
dt = dt.set_types(logical_types={
'full_name': FullName,
'email': EmailAddress,
'phone_number': PhoneNumber,
'age': Double,
'signup_date': Datetime,
})
dt.set_types(semantic_tags={
'full_name': ['new_tag', 'tag2'],
'age': 'numeric',
})
empty_dt = dt._new_dt_from_cols([])
assert len(empty_dt.columns) == 0
just_index = dt._new_dt_from_cols(['id'])
assert just_index.index == dt.index
assert just_index.time_index is None
validate_subset_dt(just_index, dt)
just_time_index = dt._new_dt_from_cols(['signup_date'])
assert just_time_index.time_index == dt.time_index
assert just_time_index.index is None
validate_subset_dt(just_time_index, dt)
transfer_schema = dt._new_dt_from_cols(['phone_number'])
assert transfer_schema.index is None
assert transfer_schema.time_index is None
validate_subset_dt(transfer_schema, dt)
def test_pop(sample_df):
dt = DataTable(sample_df,
name='datatable',
logical_types={'age': Integer},
semantic_tags={'age': 'custom_tag'},
use_standard_tags=True)
datacol = dt.pop('age')
assert isinstance(datacol, DataColumn)
assert 'custom_tag' in datacol.semantic_tags
assert all(to_pandas(datacol.to_series()).values == [33, 25, 33, 57])
assert datacol.logical_type == Integer
assert 'age' not in dt.to_dataframe().columns
assert 'age' not in dt.columns
assert 'age' not in dt.logical_types.keys()
assert 'age' not in dt.semantic_tags.keys()
def test_shape(categorical_df):
dt = ww.DataTable(categorical_df)
dt_shape = dt.shape
df_shape = dt.to_dataframe().shape
if dd and isinstance(categorical_df, dd.DataFrame):
assert isinstance(dt.shape[0], dask_delayed.Delayed)
dt_shape = (dt_shape[0].compute(), dt_shape[1])
df_shape = (df_shape[0].compute(), df_shape[1])
assert dt_shape == (10, 5)
assert dt_shape == df_shape
dt.pop('ints')
dt_shape = dt.shape
df_shape = dt.to_dataframe().shape
if dd and isinstance(categorical_df, dd.DataFrame):
assert isinstance(dt.shape[0], dask_delayed.Delayed)
dt_shape = (dt_shape[0].compute(), dt_shape[1])
df_shape = (df_shape[0].compute(), df_shape[1])
assert dt_shape == (10, 4)
assert dt_shape == df_shape
def test_select_invalid_inputs(sample_df):
dt = DataTable(sample_df, time_index='signup_date', index='id', name='dt_name')
dt = dt.set_types(logical_types={
'full_name': FullName,
'email': EmailAddress,
'phone_number': PhoneNumber,
'age': Double,
'signup_date': Datetime,
})
dt = dt.set_types(semantic_tags={
'full_name': ['new_tag', 'tag2'],
'age': 'numeric',
})
err_msg = "Invalid selector used in include: 1 must be either a string or LogicalType"
with pytest.raises(TypeError, match=err_msg):
dt.select(['boolean', 'index', Double, 1])
dt_empty = dt.select([])
assert len(dt_empty.columns) == 0
def test_select_single_inputs(sample_df):
dt = DataTable(sample_df, time_index='signup_date', index='id', name='dt_name')
dt = dt.set_types(logical_types={
'full_name': FullName,
'email': EmailAddress,
'phone_number': PhoneNumber,
'signup_date': Datetime(datetime_format='%Y-%m-%d')
})
dt = dt.set_types(semantic_tags={
'full_name': ['new_tag', 'tag2'],
'age': 'numeric',
'signup_date': 'date_of_birth'
})
dt_ltype_string = dt.select('full_name')
assert len(dt_ltype_string.columns) == 1
assert 'full_name' in dt_ltype_string.columns
dt_ltype_obj = dt.select(Integer)
assert len(dt_ltype_obj.columns) == 2
assert 'age' in dt_ltype_obj.columns
assert 'id' in dt_ltype_obj.columns
dt_tag_string = dt.select('index')
assert len(dt_tag_string.columns) == 1
assert 'id' in dt_tag_string.columns
dt_tag_instantiated = dt.select('Datetime')
assert len(dt_tag_instantiated.columns) == 1
assert 'signup_date' in dt_tag_instantiated.columns
def test_select_list_inputs(sample_df):
dt = DataTable(sample_df, time_index='signup_date', index='id', name='dt_name')
dt = dt.set_types(logical_types={
'full_name': FullName,
'email': EmailAddress,
'phone_number': PhoneNumber,
'signup_date': Datetime(datetime_format='%Y-%m-%d'),
})
dt = dt.set_types(semantic_tags={
'full_name': ['new_tag', 'tag2'],
'age': 'numeric',
'signup_date': 'date_of_birth',
'email': 'tag2',
'is_registered': 'category'
})
dt_just_strings = dt.select(['FullName', 'index', 'tag2', 'boolean'])
assert len(dt_just_strings.columns) == 4
assert 'id' in dt_just_strings.columns
assert 'full_name' in dt_just_strings.columns
assert 'email' in dt_just_strings.columns
assert 'is_registered' in dt_just_strings.columns
dt_mixed_selectors = dt.select([FullName, 'index', 'time_index', Integer])
assert len(dt_mixed_selectors.columns) == 4
assert 'id' in dt_mixed_selectors.columns
assert 'full_name' in dt_mixed_selectors.columns
assert 'signup_date' in dt_mixed_selectors.columns
assert 'age' in dt_mixed_selectors.columns
dt_common_tags = dt.select(['category', 'numeric', Boolean, Datetime])
assert len(dt_common_tags.columns) == 3
assert 'is_registered' in dt_common_tags.columns
assert 'age' in dt_common_tags.columns
assert 'signup_date' in dt_common_tags.columns
def test_select_instantiated():
ymd_format = Datetime(datetime_format='%Y~%m~%d')
df = pd.DataFrame({
'dates': ["2019/01/01", "2019/01/02", "2019/01/03"],
'ymd': ["2019~01~01", "2019~01~02", "2019~01~03"],
})
dt = DataTable(df,
logical_types={'ymd': ymd_format,
'dates': Datetime})
dt = dt.select('Datetime')
assert len(dt.columns) == 2
err_msg = "Invalid selector used in include: Datetime cannot be instantiated"
with pytest.raises(TypeError, match=err_msg):
dt.select(ymd_format)
def test_select_maintain_order(sample_df):
dt = DataTable(sample_df, logical_types={col_name: 'NaturalLanguage' for col_name in sample_df.columns})
new_dt = dt.select('NaturalLanguage')
check_column_order(dt, new_dt)
def test_filter_cols(sample_df):
dt = DataTable(sample_df, time_index='signup_date', index='id', name='dt_name')
filtered = dt._filter_cols(include='email', col_names=True)
assert filtered == ['email']
filtered_log_type_string = dt._filter_cols(include='NaturalLanguage')
filtered_log_type = dt._filter_cols(include=NaturalLanguage)
assert filtered_log_type == filtered_log_type_string
filtered_semantic_tag = dt._filter_cols(include='numeric')
assert filtered_semantic_tag == ['age']
filtered_multiple = dt._filter_cols(include=['numeric'])
expected = ['phone_number', 'age']
for col in filtered_multiple:
assert col in expected
filtered_multiple_overlap = dt._filter_cols(include=['NaturalLanguage', 'email'], col_names=True)
expected = ['full_name', 'phone_number', 'email']
for col in filtered_multiple_overlap:
assert col in expected
def test_datetime_inference_with_format_param():
df = pd.DataFrame({
'index': [0, 1, 2],
'dates': ["2019/01/01", "2019/01/02", "2019/01/03"],
'ymd_special': ["2019~01~01", "2019~01~02", "2019~01~03"],
'mdy_special': pd.Series(['3~11~2000', '3~12~2000', '3~13~2000'], dtype='string'),
})
dt = DataTable(df,
name='dt_name',
logical_types={'ymd_special': Datetime(datetime_format='%Y~%m~%d'),
'mdy_special': Datetime(datetime_format='%m~%d~%Y'),
'dates': Datetime},
time_index='ymd_special')
assert dt.time_index == 'ymd_special'
assert dt['dates'].logical_type == Datetime
assert isinstance(dt['ymd_special'].logical_type, Datetime)
assert isinstance(dt['mdy_special'].logical_type, Datetime)
dt = dt.set_time_index('mdy_special')
assert dt.time_index == 'mdy_special'
df = pd.DataFrame({
'mdy_special': pd.Series(['3&11&2000', '3&12&2000', '3&13&2000'], dtype='string'),
})
dt = DataTable(df)
dt = dt.set_types(logical_types={'mdy_special': Datetime(datetime_format='%m&%d&%Y')})
dt.time_index = 'mdy_special'
assert isinstance(dt['mdy_special'].logical_type, Datetime)
assert dt.time_index == 'mdy_special'
def test_natural_language_inference_with_config_options():
dataframe = pd.DataFrame({
'index': [0, 1, 2],
'values': ["0123456", "01234567", "012345"]
})
ww.config.set_option('natural_language_threshold', 5)
dt = DataTable(dataframe, name='dt_name')
assert dt.columns['values'].logical_type == NaturalLanguage
ww.config.reset_option('natural_language_threshold')
def test_describe_dict(describe_df):
dt = DataTable(describe_df, index='index_col')
stats_dict = dt.describe_dict()
index_order = ['physical_type',
'logical_type',
'semantic_tags',
'count',
'nunique',
'nan_count',
'mean',
'mode',
'std',
'min',
'first_quartile',
'second_quartile',
'third_quartile',
'max',
'num_true',
'num_false']
stats_dict_to_df = pd.DataFrame(stats_dict).reindex(index_order)
stats_df = dt.describe()
pd.testing.assert_frame_equal(stats_df, stats_dict_to_df)
def test_describe_does_not_include_index(describe_df):
dt = DataTable(describe_df, index='index_col')
stats_df = dt.describe()
assert 'index_col' not in stats_df.columns
def test_datatable_describe_method(describe_df):
categorical_ltypes = [Categorical,
CountryCode,
Ordinal(order=('yellow', 'red', 'blue')),
SubRegionCode,
ZIPCode]
boolean_ltypes = [Boolean]
datetime_ltypes = [Datetime]
formatted_datetime_ltypes = [Datetime(datetime_format='%Y~%m~%d')]
timedelta_ltypes = [Timedelta]
numeric_ltypes = [Double, Integer]
natural_language_ltypes = [EmailAddress, Filepath, FullName, IPAddress,
PhoneNumber, URL]
latlong_ltypes = [LatLong]
expected_index = ['physical_type',
'logical_type',
'semantic_tags',
'count',
'nunique',
'nan_count',
'mean',
'mode',
'std',
'min',
'first_quartile',
'second_quartile',
'third_quartile',
'max',
'num_true',
'num_false']
# Test categorical columns
category_data = describe_df[['category_col']]
if ks and isinstance(category_data, ks.DataFrame):
expected_dtype = 'object'
else:
expected_dtype = 'category'
for ltype in categorical_ltypes:
expected_vals = pd.Series({
'physical_type': expected_dtype,
'logical_type': ltype,
'semantic_tags': {'category', 'custom_tag'},
'count': 7,
'nunique': 3,
'nan_count': 1,
'mode': 'red'}, name='category_col')
dt = DataTable(category_data, logical_types={'category_col': ltype}, semantic_tags={'category_col': 'custom_tag'})
stats_df = dt.describe()
assert isinstance(stats_df, pd.DataFrame)
assert set(stats_df.columns) == {'category_col'}
assert stats_df.index.tolist() == expected_index
pd.testing.assert_series_equal(expected_vals, stats_df['category_col'].dropna())
# Test boolean columns
boolean_data = describe_df[['boolean_col']]
if ks and isinstance(category_data, ks.DataFrame):
expected_dtype = 'bool'
else:
expected_dtype = 'boolean'
for ltype in boolean_ltypes:
expected_vals = pd.Series({
'physical_type': expected_dtype,
'logical_type': ltype,
'semantic_tags': {'custom_tag'},
'count': 8,
'nan_count': 0,
'mode': True,
'num_true': 5,
'num_false': 3}, name='boolean_col')
dt = DataTable(boolean_data, logical_types={'boolean_col': ltype}, semantic_tags={'boolean_col': 'custom_tag'})
stats_df = dt.describe()
assert isinstance(stats_df, pd.DataFrame)
assert set(stats_df.columns) == {'boolean_col'}
assert stats_df.index.tolist() == expected_index
pd.testing.assert_series_equal(expected_vals, stats_df['boolean_col'].dropna())
# Test datetime columns
datetime_data = describe_df[['datetime_col']]
for ltype in datetime_ltypes:
expected_vals = pd.Series({
'physical_type': ltype.pandas_dtype,
'logical_type': ltype,
'semantic_tags': {'custom_tag'},
'count': 7,
'nunique': 6,
'nan_count': 1,
'mean': pd.Timestamp('2020-01-19 09:25:42.857142784'),
'mode': pd.Timestamp('2020-02-01 00:00:00'),
'min': pd.Timestamp('2020-01-01 00:00:00'),
'max': pd.Timestamp('2020-02-02 18:00:00')}, name='datetime_col')
dt = DataTable(datetime_data, logical_types={'datetime_col': ltype}, semantic_tags={'datetime_col': 'custom_tag'})
stats_df = dt.describe()
assert isinstance(stats_df, pd.DataFrame)
assert set(stats_df.columns) == {'datetime_col'}
assert stats_df.index.tolist() == expected_index
pd.testing.assert_series_equal(expected_vals, stats_df['datetime_col'].dropna())
# Test formatted datetime columns
formatted_datetime_data = describe_df[['formatted_datetime_col']]
for ltype in formatted_datetime_ltypes:
converted_to_datetime = pd.to_datetime(['2020-01-01',
'2020-02-01',
'2020-03-01',
'2020-02-02',
'2020-03-02',
pd.NaT,
'2020-02-01',
'2020-01-02'])
expected_vals = pd.Series({
'physical_type': ltype.pandas_dtype,
'logical_type': ltype,
'semantic_tags': {'custom_tag'},
'count': 7,
'nunique': 6,
'nan_count': 1,
'mean': converted_to_datetime.mean(),
'mode': pd.to_datetime('2020-02-01'),
'min': converted_to_datetime.min(),
'max': converted_to_datetime.max()}, name='formatted_datetime_col')
dt = DataTable(formatted_datetime_data,
logical_types={'formatted_datetime_col': ltype},
semantic_tags={'formatted_datetime_col': 'custom_tag'})
stats_df = dt.describe()
assert isinstance(stats_df, pd.DataFrame)
assert set(stats_df.columns) == {'formatted_datetime_col'}
assert stats_df.index.tolist() == expected_index
pd.testing.assert_series_equal(expected_vals, stats_df['formatted_datetime_col'].dropna())
# Test timedelta columns - Skip for Koalas
if not (ks and isinstance(describe_df, ks.DataFrame)):
timedelta_data = describe_df['timedelta_col']
for ltype in timedelta_ltypes:
expected_vals = pd.Series({
'physical_type': ltype.pandas_dtype,
'logical_type': ltype,
'semantic_tags': {'custom_tag'},
'count': 7,
'nan_count': 1,
'mode': pd.Timedelta('31days')}, name='col')
df = pd.DataFrame({'col': timedelta_data})
dt = DataTable(df, logical_types={'col': ltype}, semantic_tags={'col': 'custom_tag'})
stats_df = dt.describe()
assert isinstance(stats_df, pd.DataFrame)
assert set(stats_df.columns) == {'col'}
assert stats_df.index.tolist() == expected_index
pd.testing.assert_series_equal(expected_vals, stats_df['col'].dropna())
# Test numeric columns
numeric_data = describe_df[['numeric_col']]
for ltype in numeric_ltypes:
expected_vals = pd.Series({
'physical_type': ltype.pandas_dtype,
'logical_type': ltype,
'semantic_tags': {'numeric', 'custom_tag'},
'count': 7,
'nunique': 6,
'nan_count': 1,
'mean': 20.857142857142858,
'mode': 10,
'std': 18.27957486220227,
'min': 1,
'first_quartile': 10,
'second_quartile': 17,
'third_quartile': 26,
'max': 56}, name='numeric_col')
dt = DataTable(numeric_data, logical_types={'numeric_col': ltype}, semantic_tags={'numeric_col': 'custom_tag'})
stats_df = dt.describe()
assert isinstance(stats_df, pd.DataFrame)
assert set(stats_df.columns) == {'numeric_col'}
assert stats_df.index.tolist() == expected_index
pd.testing.assert_series_equal(expected_vals, stats_df['numeric_col'].dropna(), check_exact=False)
# Test natural language columns
natural_language_data = describe_df[['natural_language_col']]
if ks and isinstance(category_data, ks.DataFrame):
expected_dtype = 'object'
else:
expected_dtype = 'string'
for ltype in natural_language_ltypes:
expected_vals = pd.Series({
'physical_type': expected_dtype,
'logical_type': ltype,
'semantic_tags': {'custom_tag'},
'count': 7,
'nan_count': 1,
'mode': 'Duplicate sentence.'}, name='natural_language_col')
dt = DataTable(natural_language_data,
logical_types={'natural_language_col': ltype},
semantic_tags={'natural_language_col': 'custom_tag'})
stats_df = dt.describe()
assert isinstance(stats_df, pd.DataFrame)
assert set(stats_df.columns) == {'natural_language_col'}
assert stats_df.index.tolist() == expected_index
pd.testing.assert_series_equal(expected_vals, stats_df['natural_language_col'].dropna())
# Test latlong columns
latlong_data = describe_df[['latlong_col']]
expected_dtype = 'object'
for ltype in latlong_ltypes:
mode = [0, 0] if ks and isinstance(describe_df, ks.DataFrame) else (0, 0)
expected_vals = pd.Series({
'physical_type': expected_dtype,
'logical_type': ltype,
'semantic_tags': {'custom_tag'},
'count': 6,
'nan_count': 2,
'mode': mode}, name='latlong_col')
dt = DataTable(latlong_data,
logical_types={'latlong_col': ltype},
semantic_tags={'latlong_col': 'custom_tag'})
stats_df = dt.describe()
assert isinstance(stats_df, pd.DataFrame)
assert set(stats_df.columns) == {'latlong_col'}
assert stats_df.index.tolist() == expected_index
pd.testing.assert_series_equal(expected_vals, stats_df['latlong_col'].dropna())
def test_datatable_describe_with_improper_tags(describe_df):
df = describe_df.copy()[['boolean_col', 'natural_language_col']]
logical_types = {
'boolean_col': Boolean,
'natural_language_col': NaturalLanguage,
}
semantic_tags = {
'boolean_col': 'category',
'natural_language_col': 'numeric',
}
dt = DataTable(df, logical_types=logical_types, semantic_tags=semantic_tags)
stats_df = dt.describe()
# Make sure boolean stats were computed with improper 'category' tag
assert stats_df['boolean_col']['logical_type'] == Boolean
assert stats_df['boolean_col']['semantic_tags'] == {'category'}
# Make sure numeric stats were not computed with improper 'numeric' tag
assert stats_df['natural_language_col']['semantic_tags'] == {'numeric'}
assert stats_df['natural_language_col'][['mean', 'std', 'min', 'max']].isnull().all()
def test_datatable_describe_with_no_semantic_tags(describe_df):
df = describe_df.copy()[['category_col', 'numeric_col']]
logical_types = {
'category_col': Categorical,
'numeric_col': Integer,
}
dt = DataTable(df, logical_types=logical_types, use_standard_tags=False)
stats_df = dt.describe()
assert dt['category_col'].semantic_tags == set()
assert dt['numeric_col'].semantic_tags == set()
# Make sure category stats were computed
assert stats_df['category_col']['semantic_tags'] == set()
assert stats_df['category_col']['nunique'] == 3
# Make sure numeric stats were computed
assert stats_df['numeric_col']['semantic_tags'] == set()
np.testing.assert_almost_equal(stats_df['numeric_col']['mean'], 20.85714, 5)
def test_datatable_describe_with_include(sample_df):
semantic_tags = {
'full_name': 'tag1',
'email': ['tag2'],
'age': ['numeric', 'age']
}
dt = DataTable(sample_df, semantic_tags=semantic_tags)
col_name_df = dt.describe(include=['full_name'])
assert col_name_df.shape == (16, 1)
assert 'full_name', 'email' in col_name_df.columns
semantic_tags_df = dt.describe(['tag1', 'tag2'])
assert 'full_name' in col_name_df.columns
assert len(semantic_tags_df.columns) == 2
logical_types_df = dt.describe([Datetime, Boolean])
assert 'signup_date', 'is_registered' in logical_types_df.columns
assert len(logical_types_df.columns) == 2
multi_params_df = dt.describe(['age', 'tag1', Datetime])
expected = ['full_name', 'age', 'signup_date']
for col_name in expected:
assert col_name in multi_params_df.columns
multi_params_df['full_name'].equals(col_name_df['full_name'])
multi_params_df['full_name'].equals(dt.describe()['full_name'])
def test_value_counts(categorical_df):
logical_types = {
'ints': Integer,
'categories1': Categorical,
'bools': Boolean,
'categories2': Categorical,
'categories3': Categorical,
}
dt = DataTable(categorical_df, logical_types=logical_types)
val_cts = dt.value_counts()
for col in dt.columns:
if col in ['ints', 'bools']:
assert col not in val_cts
else:
assert col in val_cts
none_val = np.nan
expected_cat1 = [{'value': 200, 'count': 4}, {'value': 100, 'count': 3}, {'value': 1, 'count': 2}, {'value': 3, 'count': 1}]
# Koalas converts numeric categories to strings, so we need to update the expected values for this
# Koalas will result in `None` instead of `np.nan` in categorical columns
if ks and isinstance(categorical_df, ks.DataFrame):
updated_results = []
for items in expected_cat1:
updated_results.append({k: (str(v) if k == 'value' else v) for k, v in items.items()})
expected_cat1 = updated_results
none_val = 'None'
assert val_cts['categories1'] == expected_cat1
assert val_cts['categories2'] == [{'value': none_val, 'count': 6}, {'value': 'test', 'count': 3}, {'value': 'test2', 'count': 1}]
assert val_cts['categories3'] == [{'value': none_val, 'count': 7}, {'value': 'test', 'count': 3}]
val_cts_descending = dt.value_counts(ascending=True)
for col, vals in val_cts_descending.items():
for i in range(len(vals)):
assert vals[i]['count'] == val_cts[col][-i - 1]['count']
val_cts_dropna = dt.value_counts(dropna=True)
assert val_cts_dropna['categories3'] == [{'value': 'test', 'count': 3}]
val_cts_2 = dt.value_counts(top_n=2)
for col in val_cts_2:
assert len(val_cts_2[col]) == 2
def test_datatable_replace_nans_for_mutual_info():
df_nans = pd.DataFrame({
'ints': pd.Series([2, pd.NA, 5, 2], dtype='Int64'),
'floats': pd.Series([3.3, None, 2.3, 1.3]),
'bools': pd.Series([True, None, True, False]),
'int_to_cat_nan': pd.Series([1, np.nan, 3, 1], dtype='category'),
'str': | pd.Series(['test', np.nan, 'test2', 'test']) | pandas.Series |
import re
from unittest.mock import Mock, call, patch
import numpy as np
import pandas as pd
import pytest
from rdt.transformers.categorical import (
CategoricalFuzzyTransformer, CategoricalTransformer, LabelEncodingTransformer,
OneHotEncodingTransformer)
RE_SSN = re.compile(r'\d\d\d-\d\d-\d\d\d\d')
class TestCategoricalTransformer:
def test___setstate__(self):
"""Test the ``__set_state__`` method.
Validate that the ``__dict__`` attribute is correctly udpdated when
Setup:
- create an instance of a ``CategoricalTransformer``.
Side effect:
- it updates the ``__dict__`` attribute of the object.
"""
# Setup
transformer = CategoricalTransformer()
# Run
transformer.__setstate__({
'intervals': {
None: 'abc'
}
})
# Assert
assert transformer.__dict__['intervals'][np.nan] == 'abc'
def test___init__(self):
"""Passed arguments must be stored as attributes."""
# Run
transformer = CategoricalTransformer(
fuzzy='fuzzy_value',
clip='clip_value',
)
# Asserts
assert transformer.fuzzy == 'fuzzy_value'
assert transformer.clip == 'clip_value'
def test_is_transform_deterministic(self):
"""Test the ``is_transform_deterministic`` method.
Validate that this method returs the opposite boolean value of the ``fuzzy`` parameter.
Setup:
- initialize a ``CategoricalTransformer`` with ``fuzzy = True``.
Output:
- the boolean value which is the opposite of ``fuzzy``.
"""
# Setup
transformer = CategoricalTransformer(fuzzy=True)
# Run
output = transformer.is_transform_deterministic()
# Assert
assert output is False
def test_is_composition_identity(self):
"""Test the ``is_composition_identity`` method.
Since ``COMPOSITION_IS_IDENTITY`` is True, just validates that the method
returns the opposite boolean value of the ``fuzzy`` parameter.
Setup:
- initialize a ``CategoricalTransformer`` with ``fuzzy = True``.
Output:
- the boolean value which is the opposite of ``fuzzy``.
"""
# Setup
transformer = CategoricalTransformer(fuzzy=True)
# Run
output = transformer.is_composition_identity()
# Assert
assert output is False
def test__get_intervals(self):
"""Test the ``_get_intervals`` method.
Validate that the intervals for each categorical value are correct.
Input:
- a pandas series containing categorical values.
Output:
- a tuple, where the first element describes the intervals for each
categorical value (start, end).
"""
# Run
data = pd.Series(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
result = CategoricalTransformer._get_intervals(data)
# Asserts
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
'bar': (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
expected_means = pd.Series({
'foo': 0.25,
'bar': 0.6666666666666666,
'tar': 0.9166666666666666
})
expected_starts = pd.DataFrame({
'category': ['foo', 'bar', 'tar'],
'start': [0, 0.5, 0.8333333333333333]
}).set_index('start')
assert result[0] == expected_intervals
pd.testing.assert_series_equal(result[1], expected_means)
pd.testing.assert_frame_equal(result[2], expected_starts)
def test__get_intervals_nans(self):
"""Test the ``_get_intervals`` method when data contains nan's.
Validate that the intervals for each categorical value are correct, when passed
data containing nan values.
Input:
- a pandas series cotaining nan values and categorical values.
Output:
- a tuple, where the first element describes the intervals for each
categorical value (start, end).
"""
# Setup
data = pd.Series(['foo', np.nan, None, 'foo', 'foo', 'tar'])
# Run
result = CategoricalTransformer._get_intervals(data)
# Assert
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
np.nan: (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
expected_means = pd.Series({
'foo': 0.25,
np.nan: 0.6666666666666666,
'tar': 0.9166666666666666
})
expected_starts = pd.DataFrame({
'category': ['foo', np.nan, 'tar'],
'start': [0, 0.5, 0.8333333333333333]
}).set_index('start')
assert result[0] == expected_intervals
pd.testing.assert_series_equal(result[1], expected_means)
pd.testing.assert_frame_equal(result[2], expected_starts)
def test__fit_intervals(self):
# Setup
transformer = CategoricalTransformer()
# Run
data = pd.Series(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
transformer._fit(data)
# Asserts
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
'bar': (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
expected_means = pd.Series({
'foo': 0.25,
'bar': 0.6666666666666666,
'tar': 0.9166666666666666
})
expected_starts = pd.DataFrame({
'category': ['foo', 'bar', 'tar'],
'start': [0, 0.5, 0.8333333333333333]
}).set_index('start')
assert transformer.intervals == expected_intervals
pd.testing.assert_series_equal(transformer.means, expected_means)
pd.testing.assert_frame_equal(transformer.starts, expected_starts)
def test__get_value_no_fuzzy(self):
# Setup
transformer = CategoricalTransformer(fuzzy=False)
transformer.intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
np.nan: (0.5, 1.0, 0.75, 0.5 / 6),
}
# Run
result_foo = transformer._get_value('foo')
result_nan = transformer._get_value(np.nan)
# Asserts
assert result_foo == 0.25
assert result_nan == 0.75
@patch('rdt.transformers.categorical.norm')
def test__get_value_fuzzy(self, norm_mock):
# setup
norm_mock.rvs.return_value = 0.2745
transformer = CategoricalTransformer(fuzzy=True)
transformer.intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
}
# Run
result = transformer._get_value('foo')
# Asserts
assert result == 0.2745
def test__normalize_no_clip(self):
"""Test normalize data"""
# Setup
transformer = CategoricalTransformer(clip=False)
# Run
data = pd.Series([-0.43, 0.1234, 1.5, -1.31])
result = transformer._normalize(data)
# Asserts
expect = pd.Series([0.57, 0.1234, 0.5, 0.69], dtype=float)
pd.testing.assert_series_equal(result, expect)
def test__normalize_clip(self):
"""Test normalize data with clip=True"""
# Setup
transformer = CategoricalTransformer(clip=True)
# Run
data = pd.Series([-0.43, 0.1234, 1.5, -1.31])
result = transformer._normalize(data)
# Asserts
expect = pd.Series([0.0, 0.1234, 1.0, 0.0], dtype=float)
pd.testing.assert_series_equal(result, expect)
def test__reverse_transform_array(self):
"""Test reverse_transform a numpy.array"""
# Setup
data = pd.Series(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
rt_data = np.array([-0.6, 0.5, 0.6, 0.2, 0.1, -0.2])
transformer = CategoricalTransformer()
# Run
transformer._fit(data)
result = transformer._reverse_transform(rt_data)
# Asserts
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
'bar': (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
assert transformer.intervals == expected_intervals
expect = pd.Series(data)
pd.testing.assert_series_equal(result, expect)
def test__transform_by_category_called(self):
"""Test that the `_transform_by_category` method is called.
When the number of rows is greater than the number of categories, expect
that the `_transform_by_category` method is called.
Setup:
The categorical transformer is instantiated with 4 categories.
Input:
- data with 5 rows.
Output:
- the output of `_transform_by_category`.
Side effects:
- `_transform_by_category` will be called once.
"""
# Setup
data = pd.Series([1, 3, 3, 2, 1])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
# Run
transformed = CategoricalTransformer._transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._transform_by_category.assert_called_once_with(data)
assert transformed == categorical_transformer_mock._transform_by_category.return_value
def test__transform_by_category(self):
"""Test the `_transform_by_category` method with numerical data.
Expect that the correct transformed data is returned.
Setup:
The categorical transformer is instantiated with 4 categories and intervals.
Input:
- data with 5 rows.
Ouptut:
- the transformed data.
"""
# Setup
data = pd.Series([1, 3, 3, 2, 1])
transformer = CategoricalTransformer()
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_category(data)
# Asserts
expected = np.array([0.875, 0.375, 0.375, 0.625, 0.875])
assert (transformed == expected).all()
def test__transform_by_category_nans(self):
"""Test the ``_transform_by_category`` method with data containing nans.
Validate that the data is transformed correctly when it contains nan's.
Setup:
- the categorical transformer is instantiated, and the appropriate ``intervals``
attribute is set.
Input:
- a pandas series containing nan's.
Output:
- a numpy array containing the transformed data.
"""
# Setup
data = pd.Series([np.nan, 3, 3, 2, np.nan])
transformer = CategoricalTransformer()
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
np.nan: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_category(data)
# Asserts
expected = np.array([0.875, 0.375, 0.375, 0.625, 0.875])
assert (transformed == expected).all()
@patch('rdt.transformers.categorical.norm')
def test__transform_by_category_fuzzy_true(self, norm_mock):
"""Test the ``_transform_by_category`` method when ``fuzzy`` is True.
Validate that the data is transformed correctly when ``fuzzy`` is True.
Setup:
- the categorical transformer is instantiated with ``fuzzy`` as True,
and the appropriate ``intervals`` attribute is set.
- the ``intervals`` attribute is set to a a dictionary of intervals corresponding
to the elements of the passed data.
- set the ``side_effect`` of the ``rvs_mock`` to the appropriate function.
Input:
- a pandas series.
Output:
- a numpy array containing the transformed data.
Side effect:
- ``rvs_mock`` should be called four times, one for each element of the
intervals dictionary.
"""
# Setup
def rvs_mock_func(loc, scale, **kwargs):
return loc
norm_mock.rvs.side_effect = rvs_mock_func
data = pd.Series([1, 3, 3, 2, 1])
transformer = CategoricalTransformer(fuzzy=True)
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_category(data)
# Assert
expected = np.array([0.875, 0.375, 0.375, 0.625, 0.875])
assert (transformed == expected).all()
norm_mock.rvs.assert_has_calls([
call(0.125, 0.041666666666666664, size=0),
call(0.375, 0.041666666666666664, size=2),
call(0.625, 0.041666666666666664, size=1),
call(0.875, 0.041666666666666664, size=2),
])
def test__transform_by_row_called(self):
"""Test that the `_transform_by_row` method is called.
When the number of rows is less than or equal to the number of categories,
expect that the `_transform_by_row` method is called.
Setup:
The categorical transformer is instantiated with 4 categories.
Input:
- data with 4 rows
Output:
- the output of `_transform_by_row`
Side effects:
- `_transform_by_row` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
# Run
transformed = CategoricalTransformer._transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._transform_by_row.assert_called_once_with(data)
assert transformed == categorical_transformer_mock._transform_by_row.return_value
def test__transform_by_row(self):
"""Test the `_transform_by_row` method with numerical data.
Expect that the correct transformed data is returned.
Setup:
The categorical transformer is instantiated with 4 categories and intervals.
Input:
- data with 4 rows
Ouptut:
- the transformed data
"""
# Setup
data = pd.Series([1, 2, 3, 4])
transformer = CategoricalTransformer()
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_row(data)
# Asserts
expected = np.array([0.875, 0.625, 0.375, 0.125])
assert (transformed == expected).all()
@patch('psutil.virtual_memory')
def test__reverse_transform_by_matrix_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_matrix` method is called.
When there is enough virtual memory, expect that the
`_reverse_transform_by_matrix` method is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return a large enough `available_memory`.
Input:
- numerical data with 4 rows
Output:
- the output of `_reverse_transform_by_matrix`
Side effects:
- `_reverse_transform_by_matrix` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
categorical_transformer_mock._normalize.return_value = data
virtual_memory = Mock()
virtual_memory.available = 4 * 4 * 8 * 3 + 1
psutil_mock.return_value = virtual_memory
# Run
reverse = CategoricalTransformer._reverse_transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._reverse_transform_by_matrix.assert_called_once_with(data)
assert reverse == categorical_transformer_mock._reverse_transform_by_matrix.return_value
@patch('psutil.virtual_memory')
def test__reverse_transform_by_matrix(self, psutil_mock):
"""Test the _reverse_transform_by_matrix method with numerical data
Expect that the transformed data is correctly reverse transformed.
Setup:
The categorical transformer is instantiated with 4 categories and means. Also patch
the `psutil.virtual_memory` function to return a large enough `available_memory`.
Input:
- transformed data with 4 rows
Ouptut:
- the original data
"""
# Setup
data = pd.Series([1, 2, 3, 4])
transformed = pd.Series([0.875, 0.625, 0.375, 0.125])
transformer = CategoricalTransformer()
transformer.means = pd.Series([0.125, 0.375, 0.625, 0.875], index=[4, 3, 2, 1])
transformer.dtype = data.dtype
virtual_memory = Mock()
virtual_memory.available = 4 * 4 * 8 * 3 + 1
psutil_mock.return_value = virtual_memory
# Run
reverse = transformer._reverse_transform_by_matrix(transformed)
# Assert
pd.testing.assert_series_equal(data, reverse)
@patch('psutil.virtual_memory')
def test__reverse_transform_by_category_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_category` method is called.
When there is not enough virtual memory and the number of rows is greater than the
number of categories, expect that the `_reverse_transform_by_category` method is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return an `available_memory` of 1.
Input:
- numerical data with 5 rows
Output:
- the output of `_reverse_transform_by_category`
Side effects:
- `_reverse_transform_by_category` will be called once
"""
# Setup
transform_data = pd.Series([1, 3, 3, 2, 1])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
categorical_transformer_mock._normalize.return_value = transform_data
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
# Run
reverse = CategoricalTransformer._reverse_transform(
categorical_transformer_mock, transform_data)
# Asserts
categorical_transformer_mock._reverse_transform_by_category.assert_called_once_with(
transform_data)
assert reverse == categorical_transformer_mock._reverse_transform_by_category.return_value
@patch('psutil.virtual_memory')
def test__reverse_transform_by_category(self, psutil_mock):
"""Test the _reverse_transform_by_category method with numerical data.
Expect that the transformed data is correctly reverse transformed.
Setup:
The categorical transformer is instantiated with 4 categories, and the means
and intervals are set for those categories. Also patch the `psutil.virtual_memory`
function to return an `available_memory` of 1.
Input:
- transformed data with 5 rows
Ouptut:
- the original data
"""
data = pd.Series([1, 3, 3, 2, 1])
transformed = pd.Series([0.875, 0.375, 0.375, 0.625, 0.875])
transformer = CategoricalTransformer()
transformer.means = pd.Series([0.125, 0.375, 0.625, 0.875], index=[4, 3, 2, 1])
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
transformer.dtype = data.dtype
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
reverse = transformer._reverse_transform_by_category(transformed)
pd.testing.assert_series_equal(data, reverse)
def test__get_category_from_start(self):
"""Test the ``_get_category_from_start`` method.
Setup:
- instantiate a ``CategoricalTransformer``, and set the attribute ``starts``
to a pandas dataframe with ``set_index`` as ``'start'``.
Input:
- an integer, an index from data.
Output:
- a category from the data.
"""
# Setup
transformer = CategoricalTransformer()
transformer.starts = pd.DataFrame({
'start': [0.0, 0.5, 0.7],
'category': ['a', 'b', 'c']
}).set_index('start')
# Run
category = transformer._get_category_from_start(2)
# Assert
assert category == 'c'
@patch('psutil.virtual_memory')
def test__reverse_transform_by_row_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_row` method is called.
When there is not enough virtual memory and the number of rows is less than or equal
to the number of categories, expect that the `_reverse_transform_by_row` method
is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return an `available_memory` of 1.
Input:
- numerical data with 4 rows
Output:
- the output of `_reverse_transform_by_row`
Side effects:
- `_reverse_transform_by_row` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
categorical_transformer_mock.starts = pd.DataFrame(
[0., 0.25, 0.5, 0.75], index=[4, 3, 2, 1], columns=['category'])
categorical_transformer_mock._normalize.return_value = data
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
# Run
reverse = CategoricalTransformer._reverse_transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._reverse_transform_by_row.assert_called_once_with(data)
assert reverse == categorical_transformer_mock._reverse_transform_by_row.return_value
@patch('psutil.virtual_memory')
def test__reverse_transform_by_row(self, psutil_mock):
"""Test the _reverse_transform_by_row method with numerical data.
Expect that the transformed data is correctly reverse transformed.
Setup:
The categorical transformer is instantiated with 4 categories, and the means, starts,
and intervals are set for those categories. Also patch the `psutil.virtual_memory`
function to return an `available_memory` of 1.
Input:
- transformed data with 4 rows
Ouptut:
- the original data
"""
# Setup
data = pd.Series([1, 2, 3, 4])
transformed = pd.Series([0.875, 0.625, 0.375, 0.125])
transformer = CategoricalTransformer()
transformer.means = pd.Series([0.125, 0.375, 0.625, 0.875], index=[4, 3, 2, 1])
transformer.starts = pd.DataFrame(
[4, 3, 2, 1], index=[0., 0.25, 0.5, 0.75], columns=['category'])
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
transformer.dtype = data.dtype
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
# Run
reverse = transformer._reverse_transform(transformed)
# Assert
pd.testing.assert_series_equal(data, reverse)
class TestOneHotEncodingTransformer:
def test___init__(self):
"""Test the ``__init__`` method.
Validate that the passed arguments are stored as attributes.
Input:
- a string passed to the ``error_on_unknown`` parameter.
Side effect:
- the ``error_on_unknown`` attribute is set to the passed string.
"""
# Run
transformer = OneHotEncodingTransformer(error_on_unknown='error_value')
# Asserts
assert transformer.error_on_unknown == 'error_value'
def test__prepare_data_empty_lists(self):
# Setup
ohet = OneHotEncodingTransformer()
data = [[], [], []]
# Assert
with pytest.raises(ValueError, match='Unexpected format.'):
ohet._prepare_data(data)
def test__prepare_data_nested_lists(self):
# Setup
ohet = OneHotEncodingTransformer()
data = [[[]]]
# Assert
with pytest.raises(ValueError, match='Unexpected format.'):
ohet._prepare_data(data)
def test__prepare_data_list_of_lists(self):
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = [['a'], ['b'], ['c']]
out = ohet._prepare_data(data)
# Assert
expected = np.array(['a', 'b', 'c'])
np.testing.assert_array_equal(out, expected)
def test__prepare_data_pandas_series(self):
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 'b', 'c'])
out = ohet._prepare_data(data)
# Assert
expected = pd.Series(['a', 'b', 'c'])
np.testing.assert_array_equal(out, expected)
def test_get_output_types(self):
"""Test the ``get_output_types`` method.
Validate that the ``_add_prefix`` method is properly applied to the ``output_types``
dictionary. For this class, the ``output_types`` dictionary is described as:
{
'value1': 'float',
'value2': 'float',
...
}
The number of items in the dictionary is defined by the ``dummies`` attribute.
Setup:
- initialize a ``OneHotEncodingTransformer`` and set:
- the ``dummies`` attribute to a list.
- the ``column_prefix`` attribute to a string.
Output:
- the ``output_types`` dictionary, but with ``self.column_prefix``
added to the beginning of the keys of the ``output_types`` dictionary.
"""
# Setup
transformer = OneHotEncodingTransformer()
transformer.column_prefix = 'abc'
transformer.dummies = [1, 2]
# Run
output = transformer.get_output_types()
# Assert
expected = {
'abc.value0': 'float',
'abc.value1': 'float'
}
assert output == expected
def test__fit_dummies_no_nans(self):
"""Test the ``_fit`` method without nans.
Check that ``self.dummies`` does not
contain nans.
Input:
- Series with values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 2, 'c'])
ohet._fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, ['a', 2, 'c'])
def test__fit_dummies_nans(self):
"""Test the ``_fit`` method without nans.
Check that ``self.dummies`` contain ``np.nan``.
Input:
- Series with values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 2, 'c', None])
ohet._fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, ['a', 2, 'c', np.nan])
def test__fit_no_nans(self):
"""Test the ``_fit`` method without nans.
Check that the settings of the transformer
are properly set based on the input. Encoding
should be activated
Input:
- Series with values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 'b', 'c'])
ohet._fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, ['a', 'b', 'c'])
np.testing.assert_array_equal(ohet._uniques, ['a', 'b', 'c'])
assert ohet._dummy_encoded
assert not ohet._dummy_na
def test__fit_no_nans_numeric(self):
"""Test the ``_fit`` method without nans.
Check that the settings of the transformer
are properly set based on the input. Encoding
should be deactivated
Input:
- Series with values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series([1, 2, 3])
ohet._fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, [1, 2, 3])
np.testing.assert_array_equal(ohet._uniques, [1, 2, 3])
assert not ohet._dummy_encoded
assert not ohet._dummy_na
def test__fit_nans(self):
"""Test the ``_fit`` method with nans.
Check that the settings of the transformer
are properly set based on the input. Encoding
and NA should be activated.
Input:
- Series with containing nan values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 'b', None])
ohet._fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, ['a', 'b', np.nan])
np.testing.assert_array_equal(ohet._uniques, ['a', 'b'])
assert ohet._dummy_encoded
assert ohet._dummy_na
def test__fit_nans_numeric(self):
"""Test the ``_fit`` method with nans.
Check that the settings of the transformer
are properly set based on the input. Encoding
should be deactivated and NA activated.
Input:
- Series with containing nan values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series([1, 2, np.nan])
ohet._fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, [1, 2, np.nan])
np.testing.assert_array_equal(ohet._uniques, [1, 2])
assert not ohet._dummy_encoded
assert ohet._dummy_na
def test__fit_single(self):
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 'a', 'a'])
ohet._fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, ['a'])
def test__transform_no_nan(self):
"""Test the ``_transform`` method without nans.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation.
Input:
- Series with values
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'b', 'c'])
ohet._uniques = ['a', 'b', 'c']
ohet._num_dummies = 3
# Run
out = ohet._transform_helper(data)
# Assert
expected = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_no_nan_categorical(self):
"""Test the ``_transform`` method without nans.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation
using the categorical branch.
Input:
- Series with categorical values
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'b', 'c'])
ohet._uniques = ['a', 'b', 'c']
ohet._indexer = [0, 1, 2]
ohet._num_dummies = 3
ohet._dummy_encoded = True
# Run
out = ohet._transform_helper(data)
# Assert
expected = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_nans_encoded(self):
"""Test the ``_transform`` method with nans.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation. Null
values should be represented by the same encoding.
Input:
- Series with values containing nans
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series([np.nan, None, 'a', 'b'])
ohet._uniques = ['a', 'b']
ohet._dummy_na = True
ohet._num_dummies = 2
# Run
out = ohet._transform_helper(data)
# Assert
expected = np.array([
[0, 0, 1],
[0, 0, 1],
[1, 0, 0],
[0, 1, 0]
])
np.testing.assert_array_equal(out, expected)
def test__transform_nans_categorical(self):
"""Test the ``_transform`` method with nans.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation using
the categorical branch. Null values should be
represented by the same encoding.
Input:
- Series with categorical values containing nans
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = | pd.Series([np.nan, None, 'a', 'b']) | pandas.Series |
import pandas as pd
import numpy as np
import os
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
def load_data(f):
'''Load a dataset for analysis.
Args:
f (str): absolute file path for Excel, .csv, or .txt data file
Returns:
df (pd.DataFrame): MultiIndex dataframe (index and columns are both MultiIndexes)
Raises:
ValueError: Error is raised when the target file formatting does not match what is required by TRANSPIRE for proper analysis.
'''
f_types = {'csv': ',', 'txt': '\t', 'xlsx': ''}
f_type = f.split('.')[-1]
if not f_type in f_types:
raise ValueError('File type must be .csv, .txt (tab-separated), or excel')
if f_type == 'xlsx':
df = pd.read_excel(f, header=[0, 1])
else:
df = pd.read_csv(f, header=[0, 1], sep=f_types[f_type])
if not all([s in df.iloc[0, :].astype(str).str.lower().values for s in ['accession', 'gene name']]):
raise ValueError('Dataframe is not properly formatted.')
idx_cols = np.where(df.iloc[0, :].astype(str).str.lower().isin(['accession', 'gene name', 'localization']))[0]
if f_type == 'xlsx':
df = pd.read_excel(f, header=[0, 1], index_col = idx_cols.tolist())
else:
df = pd.read_csv(f, index_col = idx_cols.tolist(), header=[0, 1], sep=f_types[f_type])
try:
df.index.names = [s.lower() for s in df.index.names]
df.columns.names = [s.lower() for s in df.columns.names]
except AttributeError as _:
raise ValueError('Dataframe index or column names are improperly formatted')
if not all([s in df.index.names for s in ['accession', 'gene name']])&all([s in df.columns.names for s in ['condition', 'fraction']]):
raise ValueError('Dataframe is not properly formatted. Check index and column name spelling and structure')
return df
def add_markers(df_, markers_):
'''Append organelle marker localization information to a dataframe.
Args:
df_ (pd.DataFrame): Pandas dataframe formatted for TRANSPIRE analysis
markers_(Union(str, pd.DataFrame)): String referring to an organelle marker set in external data or a custom set of markers loaded as a pd.DataFrame or pd.Series with an "accession" and "localization" column specifying organelle marker Uniprot accession numbers and their corresponding subcellular localization.
Returns:
df(pd.DataFrame): a copy of the original input dataframe with organelle localizations appended as an additional index level
'''
if isinstance(markers_, str):
markers_ = load_organelle_markers(markers_)
elif isinstance(markers_, pd.Series) or isinstance(markers_, pd.DataFrame):
markers_ = load_organelle_markers('custom', df=markers_)
else:
raise ValueError()
df = df_.copy()
if 'localization' in df.index.names:
raise ValueError('Index level "localization" already exists. If wanting to over-write these labels, remove them from the dataframe using df.reset_index("localization", drop=True)')
df['localization'] = markers_.reindex(df.index, level='accession')
return df.reset_index().set_index(df.index.names+['localization'])
def load_organelle_markers(marker_set_name, df=None):
'''Load an organelle marker set from TRANSPIRE.data.external.organelle_markers
Args:
marker_set_name (str): Name of marker set to load
df (pd.DataFrame, optional): DataFrame to coerce into proper formatting for TRANSPIRE
Returns:
markers (pd.Series): Marker set loaded as a pd.Series with index and value pairs referring to protein accession number and associated subcellular localization
Raises:
ValueError: If marker_set_name is not a valid marker set in TRANSPIRE.data.external.organelle_markers
'''
if not isinstance(marker_set_name, str):
raise ValueError("marker_set_name must be a string")
if marker_set_name == 'custom':
df = df.reset_index().copy()
df.columns = [n.lower() for n in df.columns]
if 'accession' in df.columns:
df = df.set_index('accession')
else:
raise ValueError('Marker dataframe does not have an "accession" column.')
if 'localization' in df.columns:
return df['localization'].squeeze()
else:
raise ValueError('Marker dataframe does not have a "localization" column.')
elif marker_set_name in [f.split('.')[0] for f in os.listdir(os.path.join(THIS_DIR, 'external', 'organelle_markers'))]:
return pd.read_csv(os.path.join(THIS_DIR, 'external', 'organelle_markers', '{}.csv'.format(marker_set_name)), header=0, index_col=0).squeeze()
else:
raise ValueError('{} is not a valid marker set name'.format(marker_set_name))
def load_predictions(f):
'''Load TRANSPIRE predictions from a filepath
Args:
f (str): valid filepath to .csv or .zip file
Returns:
df (pd.DataFrame): DataFrame loaded from filepath
'''
df = pd.read_csv(f, header=[0], index_col=[0, 1, 2, 3, 4, 5, 6, 7])
assert(all([i in ['accession_A', 'accession_B', 'gene name_A', 'gene name_B', 'condition_A', 'condition_B', 'localization_A', 'localization_B'] for i in df.index.names]))
return df
def load_CORUM():
'''Load core CORUM complexes
Args:
None
Returns:
corum (pd.DataFrame): DataFrame representation of CORUM core complexes information
prot_to_complex (pd.Series): Series for mapping Uniprot accession numbers to their corresponding CORUM complex IDs
complex_to_prot (pd.Series): Series for mapping CORUM complex IDs the corresponding Uniprot accession numbers of their subunits
'''
corum = pd.read_csv(os.path.join(THIS_DIR, 'external', 'coreComplexes.txt'), sep='\t', index_col=0)
prot_to_complex = {}
complex_to_prot = {}
for complex_num, accs in zip(corum.index, corum['subunits(UniProt IDs)'].str.split(';')):
for acc in accs:
if not acc in prot_to_complex:
prot_to_complex[acc] = [complex_num]
else:
prot_to_complex[acc].append(complex_num)
if not complex_num in complex_to_prot:
complex_to_prot[complex_num] = [acc]
else:
complex_to_prot[complex_num].append(acc)
prot_to_complex = | pd.Series(prot_to_complex) | pandas.Series |
#------------------------
#
# NAME: Google_trends_scraper.py
#
# CREATED: 01.09.2022 - dserke
#
# EDITS:
#
# SOURCE: https://github.com/GeneralMills/pytrends#historical-hourly-interest
# https://lazarinastoy.com/the-ultimate-guide-to-pytrends-google-trends-api-with-python/
#
# NOTES: 1. Search term and topics are two different things
# 2. keyword length limited to 100 characters
# 3. max number of kw values at one time is 5
#------------------------
#------------------------
# import libraries
#------------------------
from pytrends.request import TrendReq
import matplotlib as mpl
from matplotlib import pyplot as plt
import pandas as pd
#------------------------
# define constants
#------------------------
tz_offset = 360
kw_list = ['bitcoin', 'ethereum']
home_lang = 'en-US'
#------------------------
# data
#------------------------
# connect to Google, create model
pytrends = TrendReq(hl=home_lang, tz=tz_offset)
# query the keyword term to search for
pytrends.build_payload(kw_list, cat=0, timeframe='today 5-y', geo='', gprop='')
test_df = pytrends.interest_over_time()
kw_btc_df = pytrends.get_historical_interest(kw_list, year_start=2020, month_start=1, day_start=1, hour_start=0, year_end=2021, month_end=12, day_end=30, hour_end=0, cat=0, geo='', gprop='', sleep=0)
# Interest by Region
region_df = pytrends.interest_by_region(resolution='COUNTRY', inc_low_vol=True, inc_geo_code=False)
# ... looking at rows where all values are not equal to 0
region_df = region_df[(region_df != 0).all(1)]
# ... drop all rows that have null values in all columns
region_df.dropna(how='all',axis=0, inplace=True)
# trending searches in real time for United States
pytrends.trending_searches(pn='united_states')
# trending searches in real time for Japan
pytrends.trending_searches(pn='japan')
# get today's trending topics
trendingtoday = pytrends.today_searches(pn='US')
trendingtoday.head(20)
# Get Google Top Charts for YYYY
top_df = pytrends.top_charts(2021, hl=home_lang, tz=tz_offset, geo='GLOBAL')
top_df
kw_sugg_df = pytrends.suggestions(keyword='bitcoin')
pytrends.categories()
pytrends.related_topics()
# get related queries
related_queries = pytrends.related_queries()
related_queries.values()
# ...build lists dataframes
top = list(related_queries.values())[0]['top']
rising = list(related_queries.values())[0]['rising']
# ... convert lists to dataframes
dftop = pd.DataFrame(top)
dfrising = pd.DataFrame(rising)
# ... join two data frames
joindfs = [dftop, dfrising]
allqueries = pd.concat(joindfs, axis=1)
# ... function to change duplicates
cols = | pd.Series(allqueries.columns) | pandas.Series |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.style as style
from sklearn.base import BaseEstimator
from ..utils import (
is_factor,
numerical_gradient,
numerical_gradient_jackknife,
numerical_gradient_gaussian,
numerical_interactions,
numerical_interactions_jackknife,
numerical_interactions_gaussian,
Progbar,
score_regression,
score_classification,
)
class Explainer(BaseEstimator):
"""Class Explainer: effects of features on the response.
Attributes:
obj: an object;
fitted object containing methods `fit` and `predict`
n_jobs: an integer;
number of jobs for parallel computing
y_class: an integer;
class whose probability has to be explained (for classification only)
normalize: a boolean;
whether the features must be normalized or not (changes the effects)
"""
def __init__(self, obj, n_jobs=None, y_class=0, normalize=False):
self.obj = obj
self.n_jobs = n_jobs
self.y_mean_ = None
self.effects_ = None
self.residuals_ = None
self.r_squared_ = None
self.adj_r_squared_ = None
self.effects_ = None
self.ci_ = None
self.ci_inters_ = {}
self.type_fit = None
self.y_class = y_class # classification only
self.normalize = normalize
self.type_ci = None
def fit(
self,
X,
y,
X_names,
method="avg",
type_ci="jackknife",
scoring=None,
level=95,
col_inters=None,
):
"""Fit the explainer's attribute `obj` to training data (X, y).
Args:
X: array-like, shape = [n_samples, n_features];
Training vectors, where n_samples is the number
of samples and n_features is the number of features.
y: array-like, shape = [n_samples, ]; Target values.
X_names: {array-like}, shape = [n_features, ];
Column names (strings) for training vectors.
method: str;
Type of summary requested for effects. Either `avg`
(for average effects), `inters` (for interactions)
or `ci` (for effects including confidence intervals
around them).
type_ci: str;
Type of resampling for `method == 'ci'` (confidence
intervals around effects). Either `jackknife`
bootsrapping or `gaussian` (gaussian white noise with
standard deviation equal to `0.01` applied to the
features).
scoring: str;
measure of errors must be in ("explained_variance",
"neg_mean_absolute_error", "neg_mean_squared_error",
"neg_mean_squared_log_error", "neg_median_absolute_error",
"r2", "rmse") (default: "rmse").
level: int; Level of confidence required for
`method == 'ci'` (in %).
col_inters: str; Name of column for computing interactions.
"""
assert method in (
"avg",
"ci",
"inters",
), "must have: `method` in ('avg', 'ci', 'inters')"
n, p = X.shape
self.X_names = X_names
self.level = level
self.method = method
self.type_ci = type_ci
if is_factor(y): # classification ---
self.n_classes = len(np.unique(y))
assert (
self.y_class <= self.n_classes
), "self.y_class must be <= number of classes"
assert hasattr(
self.obj, "predict_proba"
), "`self.obj` must be a classifier and have a method `predict_proba`"
self.type_fit = "classification"
if scoring is None:
self.scoring = "accuracy"
self.score_ = score_classification(self.obj, X, y, scoring=self.scoring)
def predict_proba(x):
return self.obj.predict_proba(x)[:, self.y_class]
y_hat = predict_proba(X)
# heterogeneity of effects
if method == "avg":
self.grad_ = numerical_gradient(
predict_proba,
X,
normalize=self.normalize,
n_jobs=self.n_jobs,
)
# confidence intervals
if method == "ci":
if type_ci=="jackknife":
self.ci_ = numerical_gradient_jackknife(
predict_proba,
X,
normalize=self.normalize,
n_jobs=self.n_jobs,
level=level,
)
if type_ci=="gaussian":
self.ci_ = numerical_gradient_gaussian(
predict_proba,
X,
normalize=self.normalize,
n_jobs=self.n_jobs,
level=level,
)
# interactions
if method == "inters":
assert col_inters is not None, "`col_inters` must be provided"
self.col_inters = col_inters
ix1 = np.where(X_names == col_inters)[0][0]
pbar = Progbar(p)
if type_ci=="jackknife":
for ix2 in range(p):
self.ci_inters_.update(
{
X_names[ix2]: numerical_interactions_jackknife(
f=predict_proba,
X=X,
ix1=ix1,
ix2=ix2,
verbose=0,
)
}
)
pbar.update(ix2)
if type_ci=="gaussian":
for ix2 in range(p):
self.ci_inters_.update(
{
X_names[ix2]: numerical_interactions_gaussian(
f=predict_proba,
X=X,
ix1=ix1,
ix2=ix2,
verbose=0,
)
}
)
pbar.update(ix2)
pbar.update(p)
print("\n")
else: # is_factor(y) == False # regression ---
self.type_fit = "regression"
if scoring is None:
self.scoring = "rmse"
self.score_ = score_regression(self.obj, X, y, scoring=self.scoring)
y_hat = self.obj.predict(X)
# heterogeneity of effects
if method == "avg":
self.grad_ = numerical_gradient(
self.obj.predict,
X,
normalize=self.normalize,
n_jobs=self.n_jobs,
)
# confidence intervals
if method == "ci":
if type_ci=="jackknife":
self.ci_ = numerical_gradient_jackknife(
self.obj.predict,
X,
normalize=self.normalize,
n_jobs=self.n_jobs,
level=level,
)
if type_ci=="gaussian":
self.ci_ = numerical_gradient_gaussian(
self.obj.predict,
X,
normalize=self.normalize,
n_jobs=self.n_jobs,
level=level,
)
# interactions
if method == "inters":
assert col_inters is not None, "`col_inters` must be provided"
self.col_inters = col_inters
ix1 = np.where(X_names == col_inters)[0][0]
pbar = Progbar(p)
if type_ci=="jackknife":
for ix2 in range(p):
self.ci_inters_.update(
{
X_names[ix2]: numerical_interactions_jackknife(
f=self.obj.predict,
X=X,
ix1=ix1,
ix2=ix2,
verbose=0,
)
}
)
if type_ci=="gaussian":
for ix2 in range(p):
self.ci_inters_.update(
{
X_names[ix2]: numerical_interactions_gaussian(
f=self.obj.predict,
X=X,
ix1=ix1,
ix2=ix2,
verbose=0,
)
}
)
pbar.update(ix2)
pbar.update(p)
print("\n")
self.y_mean_ = np.mean(y)
ss_tot = np.sum((y - self.y_mean_) ** 2)
ss_reg = np.sum((y_hat - self.y_mean_) ** 2)
ss_res = np.sum((y - y_hat) ** 2)
self.residuals_ = y - y_hat
self.r_squared_ = 1 - ss_res / ss_tot
self.adj_r_squared_ = 1 - (1 - self.r_squared_) * (n - 1) / (
n - p - 1
)
# classification and regression ---
if method == "avg":
res_df = pd.DataFrame(data=self.grad_, columns=X_names)
res_df_mean = res_df.mean()
res_df_std = res_df.std()
res_df_median = res_df.median()
res_df_min = res_df.min()
res_df_max = res_df.max()
data = pd.concat(
[res_df_mean, res_df_std, res_df_median, res_df_min, res_df_max],
axis=1
)
df_effects = pd.DataFrame(
data=data.values,
columns=["mean", "std", "median", "min", "max"],
index=X_names,
)
# heterogeneity of effects
self.effects_ = df_effects.sort_values(by=["mean"], ascending=False)
return self
def summary(self):
"""Summarise results
a method in class Explainer
Args:
None
"""
assert (
(self.ci_ is not None)
| (self.effects_ is not None)
| (self.ci_inters_ is not None)
), "object not fitted, fit the object first"
if (self.ci_ is not None) & (self.method == "ci"):
# (mean_est, se_est,
# mean_est + qt*se_est, mean_est - qt*se_est,
# p_values, signif_codes)
df_mean = pd.Series(data=self.ci_[0], index=self.X_names)
df_se = pd.Series(data=self.ci_[1], index=self.X_names)
df_ubound = pd.Series(data=self.ci_[2], index=self.X_names)
df_lbound = pd.Series(data=self.ci_[3], index=self.X_names)
df_pvalue = pd.Series(data=self.ci_[4], index=self.X_names)
df_signif = pd.Series(data=self.ci_[5], index=self.X_names)
data = pd.concat(
[df_mean, df_se, df_lbound, df_ubound, df_pvalue, df_signif],
axis=1,
)
self.ci_summary_ = pd.DataFrame(
data=data.values,
columns=[
"Estimate",
"Std. Error",
str(self.level) + "% lbound",
str(self.level) + "% ubound",
"Pr(>|t|)",
"",
],
index=self.X_names,
).sort_values(by=["Estimate"], ascending=False)
print("\n")
print(f"Score ({self.scoring}): \n {np.round(self.score_, 3)}")
if self.type_fit == "regression":
print("\n")
print("Residuals: ")
self.residuals_dist_ = pd.DataFrame(
pd.Series(
data=np.quantile(
self.residuals_, q=[0, 0.25, 0.5, 0.75, 1]
),
index=["Min", "1Q", "Median", "3Q", "Max"],
)
).transpose()
print(self.residuals_dist_.to_string(index=False))
print("\n")
if self.type_ci=="jackknife":
print("Tests on marginal effects (Jackknife): ")
if self.type_ci=="gaussian":
print("Tests on marginal effects (Gaussian noise): ")
with pd.option_context(
"display.max_rows", None, "display.max_columns", None
):
print(self.ci_summary_)
print("\n")
print(
"Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘-’ 1"
)
if self.type_fit == "regression":
print("\n")
print(
f"Multiple R-squared: {np.round(self.r_squared_, 3)}, Adjusted R-squared: {np.round(self.adj_r_squared_, 3)}"
)
if (self.effects_ is not None) & (self.method == "avg"):
print("\n")
print("Heterogeneity of marginal effects: ")
with pd.option_context(
"display.max_rows", None, "display.max_columns", None
):
print(self.effects_)
print("\n")
if (self.ci_inters_ is not None) & (self.method == "inters"):
print("\n")
print("Interactions with " + self.col_inters + ": ")
with pd.option_context(
"display.max_rows", None, "display.max_columns", None
):
print(
pd.DataFrame(
self.ci_inters_,
index=[
"Estimate",
"Std. Error",
str(95) + "% lbound",
str(95) + "% ubound",
"Pr(>|t|)",
"",
],
).transpose()
)
def plot(self, what):
"""Plot average effects, heterogeneity of effects, ...
Args:
what: a string;
if .
"""
assert self.effects_ is not None, "Call method 'fit' before plotting"
assert self.grad_ is not None, "Call method 'fit' before plotting"
# For method == "avg"
if (self.method == "avg"):
if(what == "average_effects"):
sns.set(style="darkgrid")
fi = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Jan 5 09:20:37 2022
Compiles NDFD data into SQLite DB
@author: buriona,tclarkin
"""
import sys
from pathlib import Path
import pandas as pd
import sqlalchemy as sql
import sqlite3
import zipfile
from zipfile import ZipFile
# Load directories and defaults
this_dir = Path(__file__).absolute().resolve().parent
#this_dir = Path('C:/Programs/shread_dash/database/SHREAD')
ZIP_IT = False
ZIP_FRMT = zipfile.ZIP_LZMA
DEFAULT_DATE_FIELD = 'Date_Valid'
DEFAULT_CSV_DIR = Path(this_dir, 'data')
DEFAULT_DB_DIR = this_dir
COL_TYPES = {
'Date_Valid':str,'Date_Init':str,'Type':str,'Source':str,'OBJECTID':int,
'Join_Count':int,'TARGET_FID':int,'pointid':int,"grid_code":int,
'elev_ft': int, 'slope_d': int,'aspct': int, 'nlcd': int,
'LOCAL_ID': str,"POLY_SOURC":str,"TOTAL_ID":str,"TOTAL_NAME":str,
'LOCAL_NAME':str,'min':float,'max':float,'mean':float,'median':float
}
# Define functions
def get_dfs(data_dir=DEFAULT_CSV_DIR, verbose=False):
"""
Get and merge dataframes imported using shread.py
"""
mint_df_list = []
maxt_df_list = []
rhm_df_list = []
pop12_df_list = []
qpf_df_list = []
snow_df_list = []
sky_df_list = []
print('Preparing .csv files for database creation...')
for data_file in data_dir.glob('ndfd*.csv'):
if verbose:
print(f'Adding {data_file.name} to dataframe...')
df = pd.read_csv(
data_file,
usecols=COL_TYPES.keys(),
parse_dates=[DEFAULT_DATE_FIELD],
dtype=COL_TYPES
)
if not df.empty:
df = df.drop(axis=1,columns=["Source","Join_Count","TARGET_FID","pointid","grid_code","POLY_SOURC","TOTAL_ID","TOTAL_NAME","min","max","median"])
df = df.rename(columns={"Date_Valid":"Date"})
mint_df_list.append(
df[df['Type'] == 'mint'].drop(columns='Type').copy()
)
maxt_df_list.append(
df[df['Type'] == 'maxt'].drop(columns='Type').copy()
)
rhm_df_list.append(
df[df['Type'] == 'rhm'].drop(columns='Type').copy()
)
pop12_df_list.append(
df[df['Type'] == 'pop12'].drop(columns='Type').copy()
)
qpf_df_list.append(
df[df['Type'] == 'qpf'].drop(columns='Type').copy()
)
snow_df_list.append(
df[df['Type'] == 'snow'].drop(columns='Type').copy()
)
sky_df_list.append(
df[df['Type'] == 'sky'].drop(columns='Type').copy()
)
df_mint = pd.concat(mint_df_list)
df_mint.name = 'mint'
df_maxt = pd.concat(maxt_df_list)
df_maxt.name = 'maxt'
df_rhm = pd.concat(rhm_df_list)
df_rhm.name = 'rhm'
df_pop12 = pd.concat(pop12_df_list)
df_pop12.name = 'pop12'
df_qpf = pd.concat(qpf_df_list)
df_qpf.name = 'qpf'
df_snow = pd.concat(snow_df_list)
df_snow.name = 'snow'
df_sky = | pd.concat(sky_df_list) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 26 17:36:30 2020
@author: <NAME>
Explanation: How to move rows from one dataframe to another with pandas and
and substract the title's name.
"""
import pandas as pd
df = | pd.read_csv("train.csv") | pandas.read_csv |
#
# Author: <NAME>
# Strategy: I want to read the submissions and comments of r/pennystocks and count the number
# of times a particular stock symbol appears in either the submission title,
# the submission body, or any of the comment bodies.
# Observables: # of times in submission title / day
# # of times in submission body / day
# # of times in any comment body / day
# # of times in a comment body whose submission's body or title contains stock symbol / day
from re import search
import praw
from datetime import datetime as dt
from datetime import timedelta, date
import matplotlib
import pandas as pd
import numpy as np
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
import sys
from dateutil.parser import parse
import yahooquery as yq
from english_words import english_words_set as ew
import pickle
import os
# Utility Functions
def handle_cl_args():
stk_symb = "SXTC"
if(len(sys.argv)>1):
stk_symb = sys.argv[1]
print(f"Looking for occurences of {stk_symb}")
return stk_symb,
def daterange(start_date, end_date):
for n in range(int((end_date - start_date).days)):
yield start_date + timedelta(n)
def get_start_date(all_occurences):
all_occurences_keys = [list(x.keys()) for x in all_occurences]
for x in all_occurences_keys:
print(x)
all_mins = [dt.now().date().__str__() if x==[] else min(x) for x in all_occurences_keys]
print(all_mins)
return min( all_mins )
# Reddit functions
def count_occurences(stk_symb):
n_sub_title, n_sub_body, n_any_com_body, n_sub_title_com_body, n_sub_body_com_body={}, {}, {}, {}, {}
query_result = reddit.subreddit("pennystocks").search(stk_symb, sort='new', limit=1000, time_filter="month")
i=0
for sub in query_result: # search through all posts in r/all for stk_symb
i+=1
if(i%10==0):
print(f"\rProcessing post {i}", end="")
sub_date = dt.fromtimestamp(sub.created_utc) # record date
comments = sub.comments # get comments tree
comments.replace_more(limit=0) # Exapand comment tree completely
if (stk_symb in sub.title): # if stk_symb in the post's title
n_sub_title[sub_date.date().__str__()] = 1 if sub_date.date().__str__() not in n_sub_title else n_sub_title[sub_date.date().__str__()] + 1 # Add 1 to the day of post or create day in dict and set to 1
for com in comments: # Check the comments for stk_symb
if(stk_symb in com.body):
n_sub_title_com_body[sub_date.date().__str__()] = 1 if sub_date.date().__str__() not in n_sub_title_com_body else n_sub_title_com_body[sub_date.date().__str__()] + 1
if (stk_symb in sub.selftext): # if stk_symb in post's body
n_sub_body[sub_date.date().__str__()] = 1 if sub_date.date().__str__() not in n_sub_body else n_sub_body[sub_date.date().__str__()] + 1
for com in comments:
if(stk_symb in com.body):
n_sub_body_com_body[sub_date.date().__str__()] = 1 if sub_date.date().__str__() not in n_sub_body_com_body else n_sub_body_com_body[sub_date.date().__str__()] + 1
print("")
return n_sub_title, n_sub_body, n_sub_title_com_body, n_sub_body_com_body, n_any_com_body
def plot_reddit_occurences_price(stk_symb, search_term):
print(f"\n\n\n\n\n\nCounting occurences of {search_term}")
# count the occurences of our stock symbol in r/all and load the into dictionaries
# {key:value} = {day:frequency}
n_sub_title, n_sub_body, n_sub_title_com_body, n_sub_body_com_body, n_any_com_body = count_occurences(search_term)
# Let's consolidate these into a big tuple so we can get the earliest mention of a stock symbol concisely
all_occurences = (n_sub_title, n_sub_body, n_sub_title_com_body, n_sub_body_com_body, n_any_com_body)
start_date = get_start_date(all_occurences) # get the earliest mention
end_date = dt.now().date().__str__()# max(max(n_sub_body.keys()), max(n_sub_title.keys())) # I like to set the end date to today rather than the last mention so that I can see the longer term behaviour of the price
print(start_date)
print(end_date)
stk = yq.Ticker(stk_symb, status_forcelist=[404, 429, 500, 502, 503, 504])
print("Retrieving Stock History")
price_history = stk.history(start=start_date, end=end_date)
print(price_history)
mean_close = np.mean(price_history['close'])
dates = [d for d in daterange(parse(start_date), parse(end_date))]
frequency_imputer = lambda occurence_dict, dates: [0 if (d.date().__str__() not in occurence_dict.keys()) else occurence_dict[d.date().__str__()] for d in dates]
body_plot = frequency_imputer(n_sub_body, dates)
title_plot = frequency_imputer(n_sub_title, dates)
title_com_plot =frequency_imputer( n_sub_title_com_body, dates)
body_com_plot = frequency_imputer(n_sub_body_com_body, dates)
price_imputer = lambda history, dates: [None if (d.date() not in history.index.get_level_values("date").to_list()) else history.loc[history.index.get_level_values('date')==d.date(), 'close'] for d in dates]
price_history_plot = price_imputer(price_history, dates)
plt.figure(figsize=(15,5))
plt.subplot(2,1,1)
plt.grid(which='major', axis='x')
plt.title(f"Frequency of {stk_symb} in r/pennystocks")
plt.plot(range(len(dates)),body_plot, label="Occurences in body of submission")
plt.plot(range(len(dates)),title_plot, label="Occurences in title of submission")
plt.plot(range(len(dates)),body_com_plot, label="Occurences in body of comments if in submission title")
plt.plot(range(len(dates)),title_com_plot, label="Occurences in body of comments if in submission body")
plt.legend()
plt.xticks([])
plt.subplot(2,1,2)
plt.grid(which='major', axis='x')
plt.title(f"Closing Price of {stk_symb}")
plt.plot(range(len(dates)),price_history_plot, label="Closing Price")
# plt.plot(range(len(dates)),np.ones(len(dates))*mean_close, label="Mean Close")
plt.xticks(ticks=range(len(dates)), labels=list(map(lambda x: x.date(),dates)), rotation=70)
# plt.grid()
plt.legend()
plt.tight_layout()
plt.show()
def get_stocks_mentioned():
all_stocks = pd.read_csv("nasdaq.csv") # Read all stocks from csv
reddit_stocks=all_stocks
earliest_dates = []
num_mentions = []
for stock in all_stocks["Symbol"]:
earliest_date = 999999999999
search_term = stock
if stock in ew:
search_term = all_stocks.loc[all_stocks['Symbol']==stock]["Name"].iloc[0]
print("The stock symbol is a standard english word, searching for company name instead!")
mentions=0
print(f"Searching for {search_term}")
query_result = reddit.subreddit("pennystocks").search(search_term, sort='new', limit=100, time_filter="month")
for sub in query_result:
if(stock in sub.title or stock in sub.selftext):
mentions+=1
if(sub.created_utc<earliest_date):
earliest_date=sub.created_utc
print(f"{stock} has {mentions} mentions.")
if mentions<1 or mentions>40:
print(f"Throwing away {stock}")
reddit_stocks = reddit_stocks[reddit_stocks["Symbol"]!=stock]
else:
print("Adding date and mentions to stock")
earliest_dates.append(earliest_date)
num_mentions.append(mentions)
reddit_stocks["Earliest Date"] = | pd.Series(earliest_dates) | pandas.Series |
# Spectral_Analysis_Amp_and_Phase.py
import os
import numpy as np
import pandas as pd
import scipy.linalg as la
import matplotlib.pyplot as plt
# Import time from the data or define it
t = np.arange(0.015, 0.021, 10**-7)
dt = 10**-7
# Define trainsize and number of modes
trainsize = 20000 # Number of snapshots used as training data.
num_modes = 44 # Number of POD modes.
reg = 0 # Just an input in case we regularize DMDc.
# Locate the full data of snapshots FOM and ROMs (INPUT)
Folder_name_data = 'C:\\Users\\Admin\\Desktop\\combustion\\'
file_name_FOM = 'traces_gems_60k_final.npy'
file_name_ROM_DMDc = 'traces_rom_DMDc_rsvd.npy'
file_name_ROM_cubic_r25 = 'traces_rom_cubic_tripple_reg_r25.npy'
file_name_ROM_cubic_r44 = 'traces_rom_cubic_r44.npy'
file_name_ROM_Quad_r44 = 'traces_rom_60k_100_30000.npy'
# Define output file location and file names to identify phase and amplitudes (OUTPUT)
folder_name = "C:\\Users\\Admin\\Desktop\\combustion\\spectral\\Final_plots\\"
Amp_name = folder_name + "\\" + "Amp" # Amplitude plots
Phase_name = folder_name + "\\" + "Phase" # Phase plots
# Load the data
FOM_ = np.load(Folder_name_data + file_name_FOM)
ROM_DMDc = np.load(Folder_name_data + file_name_ROM_DMDc)
ROM_cubic_r25 = np.load(Folder_name_data + file_name_ROM_cubic_r25)
ROM_cubic_r44 = np.load(Folder_name_data + file_name_ROM_cubic_r44)
ROM_Quad_r44 = np.load(Folder_name_data + file_name_ROM_Quad_r44)
# Plotting adjustments
End_plot_at = 60000 # 59990 # 40000
freq_limit_to_plot = 15000
# =============================================================================
def lineplots_timeseries(FOM_,
ROM_Quad_r44, ROM_cubic_r25, ROM_cubic_r44, ROM_DMDc,
datanumber, unit, savefile):
"""Plots for comparision of data in time. Check the saved data in
folder_name.
Parameters
----------
FOM_
Full order model data input
ROM_Quad_r44
Q-OPINF at r = 44
ROM_cubic_r25
C-OPINF at r = 25
ROM_cubic_r44
C-OPINF at r = 44
ROM_DMDc
DMDc results
datanumber
Defines the state parameter
* -12 = Pressure
* -8 = Vx
* -4 = Vy
* 0 = Temperature
* 8 = [CH4]
* 12 = [O2]
* 16 = [H2O]
* 20 = [CO2]
unit
Unit for each variable (Pa, Kelvin...)
savefile
Suffix to save the file name
"""
print("Time series plots")
plt.xlim([0.015, 0.021]) # set axis limits
plt.plot(t[0:End_plot_at],
pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at],
label='FOM', linestyle='solid', c='k')
plt.plot(t[0:End_plot_at],
pd.DataFrame(ROM_Quad_r44).loc[T_st + datanumber][0:End_plot_at],
label='Q-OPINF', linestyle='dashed', c='#ff7f0e')
# plt.plot(t[0:End_plot_at],
# pd.DataFrame(ROM_cubic_r25).loc[T_st + datanumber][0:End_plot_at],
# label='C-OPINF_r25', linestyle='dashed')
plt.plot(t[0:End_plot_at],
pd.DataFrame(ROM_cubic_r44).loc[T_st + datanumber][0:End_plot_at],
label='C-OPINF', linestyle='dashed', c='b')
plt.plot(t[0:End_plot_at],
pd.DataFrame(ROM_DMDc).loc[T_st + datanumber][0:End_plot_at],
label='DMDc', linestyle='dashdot', c='r')
plt.xlabel('time')
plt.ylabel(unit)
plt.axvline(x=t[0] + trainsize*dt, color='black')
plt.legend()
fname = f"{T_st}_ts_{trainsize}_r_{num_modes}_reg_{reg}{savefile}.pdf"
plt.savefig(os.path.join(folder_name, fname),
bbox_inches="tight", dpi=200)
plt.show()
def L2errorplots(FOM_, ROM_Quad_r44, ROM_cubic_r25, ROM_cubic_r44, ROM_DMDc,
datanumber, unit):
"""Plot L2 norm error comparision between all the ROMs.
Parameters
----------
FOM_
Full order model data input
ROM_Quad_r44
Q-OPINF at r = 44
ROM_cubic_r25
C-OPINF at r = 25
ROM_cubic_r44
C-OPINF at r = 44
ROM_DMDc
DMDc results
datanumber
Defines the state parameter
* -12 = Pressure
* -8 = Vx
* -4 = Vy
* 0 = Temperature
* 8 = [CH4]
* 12 = [O2]
* 16 = [H2O]
* 20 = [CO2]
unit
Unit for each variable (Pa, Kelvin...)
"""
print("L2 norm error plot")
e_ROM_Quad_r44 = (la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at] - pd.DataFrame(ROM_Quad_r44).loc[T_st + datanumber][0:End_plot_at]))/la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at])
e_ROM_cubic_r25 = (la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at] - pd.DataFrame(ROM_cubic_r25).loc[T_st + datanumber][0:End_plot_at]))/la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at])
e_ROM_cubic_r44 = (la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at] - pd.DataFrame(ROM_cubic_r44).loc[T_st + datanumber][0:End_plot_at]))/la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at])
e_ROM_DMDc = (la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at] - pd.DataFrame(ROM_DMDc).loc[T_st + datanumber][0:End_plot_at]))/la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at])
plt.plot(t[0:End_plot_at],
pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at],
label='FOM', linestyle='solid')
plt.plot(t[0:End_plot_at],
pd.DataFrame(ROM_Quad_r44).loc[T_st + datanumber][0:End_plot_at],
label='Q-OPINF', linestyle='dashed')
plt.plot(t[0:End_plot_at],
pd.DataFrame(ROM_cubic_r25).loc[T_st + datanumber][0:End_plot_at],
label='C-OPINF_r25', linestyle='dashed')
plt.plot(t[0:End_plot_at],
| pd.DataFrame(ROM_cubic_r44) | pandas.DataFrame |
####
#
# The MIT License (MIT)
#
# Copyright 2021, 2022 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
####
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import logging
from matplotlib import cm
from copy import deepcopy
from typing import List, Optional, Union
from scipy.stats import wilcoxon, ttest_rel
# ================
# Setup the Logger
LOGGER = logging.getLogger("plotting_utils")
LOGGER.setLevel(logging.INFO)
LOGGER.propagate = False
CH = logging.StreamHandler()
CH.setLevel(logging.INFO)
FORMATTER = logging.Formatter('[%(levelname)s] %(name)s : %(message)s')
CH.setFormatter(FORMATTER)
LOGGER.addHandler(CH)
# ================
def _get_topk(x, k, method):
"""
Task: Pandas aggregation function to compute the top-k acc.
"""
out = 0.0
if method == "average":
for xi in x:
out += (np.mean(xi) <= k)
elif method == "csi":
for xi in x:
y = np.arange(xi[0], xi[1] + 1)
for yi in y:
if yi <= k:
out += (1.0 / len(y))
else:
raise ValueError("Invalid method: '%s'" % method)
# Get accuracy as percentages
out /= len(x)
out *= 100
return out
def plot__02__b(
results: pd.DataFrame, ks: Optional[Union[List[int], int]] = None, min_class_support: int = 50,
sharey: str = "all", n_samples: int = 50, topk_method: str = "csi", fig_width: int = 6, fig_height: int = 4,
label_rot_angle: float = 90
):
"""
Bar plots indicating the top-k improvements per class in the PubChemLite classification based on PubChem's TOC.
:param results: pd.DataFrame, containing the Only-MS2 and LC-MS2Struct ranks, PubChemLite classes, and further
information for the spectra in our experiments. Please check, "gather_ranl_changes__csi.py" for the details on
the data-structure. The results for different MS2-scorers are concatenated.
:param ks: scalar or list of scalars, k for which the top-k ranking performance improvements should be analysed.
:param min_class_support: scalar, minimum number of unique molecular structures per PubChemLite class.
:param sharey: string or boolean, indicating whether (and how) the y-axes ranges are synchronized.
:param n_samples: scalar, number of random samples to estimate the top-k accuracy from.
:param topk_method: deprecated
:param fig_width: scalar, width of the figure
:param fig_height: scalar, height of the figure
:param label_rot_angle: scalar, rotation angle of the x-labels
:return:
"""
assert topk_method == "csi", "Only CSI:FingerID top-k accuracy computation is supported."
# Make a deep copy of the input data, e.g. to allow modifications
results = deepcopy(results)
# Get relevant subset
pl_columns = [s for s in results.columns.tolist() if s.startswith("pubchemlite")]
info_columns = [
"correct_structure", "molecule_identifier", "rank_onlyms", "rank_msplrt", "n_cand", "n_isomers", "ms2scorer"
]
results = results \
.filter(items=pl_columns + info_columns, axis=1) \
.rename(mapper={c: c.split("_")[1] for c in pl_columns}, axis=1)
# --- Columns in the subplot ---
# k for the top-k that are plotted as columns in the subplots
if ks is None:
ks = [1, 20]
elif isinstance(ks, int):
ks = [ks]
else:
assert isinstance(ks, list)
n_k = len(ks)
# --- Rows in the subplot correspond to the MS2 scoring methods in the input data ---
if "ms2scorer" not in results.columns:
results = results.assign(ms2scorer="MS$^2$ Scorer")
l_ms2scorer = [ms2scorer for ms2scorer, _ in results.groupby("ms2scorer")]
d_row2scorer = {s: i for i, s in enumerate(l_ms2scorer)}
n_scorer = len(l_ms2scorer)
# Create the Axes-array for plotting
fig, axrr = plt.subplots(
n_scorer, n_k, figsize=(fig_width * n_k, fig_height * n_scorer), squeeze=False, sharey=sharey, sharex="all"
)
# Plot
results_out = []
for ms2scorer, res_sub in results.groupby("ms2scorer"):
for ax_col_idx, k in enumerate(ks):
# Get the axis to draw in
ax = axrr[d_row2scorer[ms2scorer], ax_col_idx]
_res_sub = []
for rep in range(n_samples):
_res = res_sub \
.sample(frac=1, random_state=rep) \
.drop_duplicates("correct_structure") \
.melt(info_columns, var_name="pubchemlite_class", value_name="membership_count")
# We can drop the rows where a molecule is not member of a particular class
_res = _res[_res["membership_count"] > 0] # type: pd.DataFrame
# Compute the top-k accuracies for Only MS and MS + RT
_res = _res \
.groupby("pubchemlite_class") \
.agg({
"rank_onlyms": lambda x: _get_topk(x, k, topk_method),
"rank_msplrt": lambda x: _get_topk(x, k, topk_method),
"n_cand": np.median,
"n_isomers": np.median,
"molecule_identifier": len
}) \
.rename({
"rank_onlyms": "top_k_p_onlyms",
"rank_msplrt": "top_k_p_msplrt",
"molecule_identifier": "n_class_support"
}, axis=1) \
.reset_index()
_res_sub.append(_res)
_res_sub = pd.concat(_res_sub, ignore_index=True)
# Add the top-k improvement in percentage-points
_res_sub = _res_sub.assign(top_k_p_improvement=(_res_sub["top_k_p_msplrt"] - _res_sub["top_k_p_onlyms"]))
# Filter classes without enough support
_res_sub = _res_sub[_res_sub["n_class_support"] >= min_class_support]
if len(_res_sub) == 0:
raise ValueError("No class has enough support.")
sns.barplot(
data=_res_sub, x="pubchemlite_class", y="top_k_p_improvement", ax=ax
)
ax.grid(axis="y")
ax.hlines(0, ax.get_xlim()[0] - 1, ax.get_xlim()[1] + 1, color='k', linestyle="--")
ax.set_title("%s - top-%d" % (ms2scorer, k), fontweight="bold")
ax.bar_label(
ax.containers[0],
labels=[
"%.1f" % _l
for _l in _res_sub.groupby("pubchemlite_class")["top_k_p_onlyms"].mean().tolist()
],
rotation=90, horizontalalignment="center", fmt="%.1f", label_type="edge", padding=10, fontsize=12
)
if d_row2scorer[ms2scorer] == (n_scorer - 1):
ax.set_xticklabels(
[
plt.Text(
_tl.get_position()[0], _tl.get_position()[1],
"%s (n=%d)" %
(
_tl.get_text(),
_res_sub[_res_sub["pubchemlite_class"] == _tl.get_text()]["n_class_support"].iloc[0]
)
)
for _tl in ax.get_xticklabels()
],
rotation=label_rot_angle, horizontalalignment="center", fontsize=12
)
ax.set_xlabel("PubChemLite classification", fontsize=12)
else:
ax.set_xlabel("")
if ax_col_idx == 0:
ax.set_ylabel("Top-k accuracy\nimprovement (%p)", fontsize=12)
else:
ax.set_ylabel("")
results_out.append(
_res_sub
.groupby("pubchemlite_class")
.agg({
"top_k_p_onlyms": np.mean,
"top_k_p_msplrt": np.mean,
"top_k_p_improvement": np.mean,
"n_cand": lambda x: x.iloc[0],
"n_isomers": lambda x: x.iloc[0],
"n_class_support": lambda x: x.iloc[0],
})
.assign(k=k, ms2scorer=ms2scorer)
.reset_index()
)
# Compute the average improvement into actual counts
results_out[-1]["improvement_in_n"] = \
(results_out[-1]["n_class_support"] * results_out[-1]["top_k_p_improvement"]) / 100
# Adjust y-axis range to provide enough space for the labels
_y_add = {1: 1.0, 5: 0.5, 20: 1.75}
for ax_col_idx, _k in enumerate(ks):
for ax in axrr[:, ax_col_idx]:
_y_min, _y_max = ax.get_ylim()
ax.set_ylim(_y_min - _y_add.get(_k, 0.0), _y_max)
plt.tight_layout()
return pd.concat(results_out, ignore_index=True)
# Same color-map as used in the supplementary material when plotting the classyfire class distribution
MY_CLASSYFIRE_CLASSES_COLORMAP = {
'Alkaloids and derivatives': (0.12156862745098039, 0.4666666666666667, 0.7058823529411765, 1.0),
'Benzenoids': (0.6823529411764706, 0.7803921568627451, 0.9098039215686274, 1.0),
'Lignans, neolignans and related compounds': (0.7686274509803922, 0.611764705882353, 0.5803921568627451, 1.0),
'Lipids and lipid-like molecules': (1.0, 0.4980392156862745, 0.054901960784313725, 1.0),
'Nucleosides, nucleotides, and analogues': (0.5490196078431373, 0.33725490196078434, 0.29411764705882354, 1.0),
'Organic acids and derivatives': (1.0, 0.7333333333333333, 0.47058823529411764, 1.0),
'Organic nitrogen compounds': (0.7725490196078432, 0.6901960784313725, 0.8352941176470589, 1.0),
'Organic oxygen compounds': (1.0, 0.596078431372549, 0.5882352941176471, 1.0),
'Organohalogen compounds': (0.5803921568627451, 0.403921568627451, 0.7411764705882353, 1.0),
'Organoheterocyclic compounds': (0.17254901960784313, 0.6274509803921569, 0.17254901960784313, 1.0),
'Other': (0.586082276047674, 0.586082276047674, 0.586082276047674, 1.0),
'Phenylpropanoids and polyketides': (0.8392156862745098, 0.15294117647058825, 0.1568627450980392, 1.0)
}
def plot__02__a(
results: pd.DataFrame, ks: Optional[Union[List[int], int]] = None, min_class_support: int = 50,
colormap_name: str = "fixed", sharey: str = "all", cf_level: str = "superclass", n_samples: int = 50,
topk_method: str = "csi", fig_width: int = 6, fig_height: int = 4, label_rot_angle: float = 90
):
"""
Bar plots indicating the top-k improvements per ClassyFire compound class.
:param results: pd.DataFrame, containing the Only-MS2 and LC-MS2Struct ranks, ClassyFire classes, and further
information for the spectra in our experiments. Please check, "gather_ranl_changes__csi.py" for the details on
the data-structure. The results for different MS2-scorers are concatenated.
:param ks: scalar or list of scalars, k for which the top-k ranking performance improvements should be analysed.
:param min_class_support: scalar, minimum number of unique molecular structures per ClassyFire class.
:param colormap_name: string, either the name of a matplotlib color-map, or "fixed". If "fixed" than pre-defined
colors are used for the ClassyFire super-classes.
:param sharey: string or boolean, indicating whether (and how) the y-axes ranges are synchronized.
:param cf_level: string, Classyfire level to analyse.
:param n_samples: scalar, number of random samples to estimate the top-k accuracy from.
:param topk_method: deprecated
:param fig_width: scalar, width of the figure
:param fig_height: scalar, height of the figure
:param label_rot_angle: scalar, rotation angle of the x-labels
:return:
"""
def _aggregate_and_filter_classyfire_classes(df, min_class_support, cf_level):
"""
Task: Group and aggregate the results by the ClassyFire class-level and determine the support for each class.
Then, remove all classes with too little support. Purpose is to get the "relevant" class and superclass
relationships to determine the colors and orders for the plotting.
"""
# We consider only unique molecular structures to compute the CF class support
tmp = df.drop_duplicates("correct_structure")
# Group by the ClassyFire level
tmp = tmp.groupby("classyfire_%s" % cf_level)
if cf_level == "class":
tmp = tmp.aggregate({
"molecule_identifier": lambda x: len(x),
"classyfire_superclass": lambda x: x.iloc[0]
})
elif cf_level == "superclass":
tmp = tmp.aggregate({
"molecule_identifier": lambda x: len(x),
"classyfire_class": lambda x: ",".join([xi for xi in x if not pd.isna(xi)])
})
else:
raise ValueError("Invalid ClassyFire level: '%s'" % cf_level)
tmp = tmp \
.rename({"molecule_identifier": "n_class_support"}, axis=1) \
.reset_index() \
.sort_values(by="classyfire_superclass")
return tmp[tmp["n_class_support"] >= min_class_support]
assert cf_level in ["superclass", "class"], "Invalid or unsupported ClassyFire class level: '%s'." % cf_level
assert topk_method == "csi", "Only CSI:FingerID top-k accuracy computation is supported."
# Make a deep copy of the input data, e.g. to allow modifications
results = deepcopy(results)
# Drop the rows for which the desired ClassyFire class has no value (NaN), e.g. some examples might not have a
# 'class'-level annotation.
results = results.dropna(subset=["classyfire_%s" % cf_level])
# --- Columns in the subplot ---
# k for the top-k that are plotted as columns in the subplots
if ks is None:
ks = [1, 20]
elif isinstance(ks, int):
ks = [ks]
else:
assert isinstance(ks, list)
n_k = len(ks)
# --- Rows in the subplot correspond to the MS2 scoring methods in the input data ---
if "ms2scorer" not in results.columns:
results = results.assign(ms2scorer="MS2 Scorer")
l_ms2scorer = [ms2scorer for ms2scorer, _ in results.groupby("ms2scorer")]
d_row2scorer = {s: i for i, s in enumerate(l_ms2scorer)}
n_scorer = len(l_ms2scorer)
# Create the Axes-array for plotting
fig, axrr = plt.subplots(
n_scorer, n_k, figsize=(fig_width * n_k, fig_height * n_scorer), squeeze=False, sharey=sharey, sharex="all"
)
# Get class-level colors based on superclass-level
cf_cls_stats = _aggregate_and_filter_classyfire_classes(results, min_class_support, cf_level)
LOGGER.debug(
"n_superclass = %d, n_class = %d" %
(cf_cls_stats["classyfire_superclass"].nunique(), cf_cls_stats["classyfire_class"].nunique())
)
superlevel = {}
palette = {}
order = []
if cf_level == "class":
for idx, (cf_sc, tmp) in enumerate(cf_cls_stats.groupby("classyfire_superclass")):
for cf_c in sorted(tmp["classyfire_class"].unique()):
if colormap_name == "fixed":
palette[cf_c] = MY_CLASSYFIRE_CLASSES_COLORMAP[cf_sc]
else:
palette[cf_c] = cm.get_cmap(colormap_name)(idx)
order.append(cf_c)
superlevel[cf_c] = cf_sc
elif cf_level == "superclass":
for idx, (cf_sc, _) in enumerate(cf_cls_stats.groupby("classyfire_superclass")):
if colormap_name == "fixed":
palette[cf_sc] = MY_CLASSYFIRE_CLASSES_COLORMAP[cf_sc]
else:
palette[cf_sc] = cm.get_cmap(colormap_name)(idx)
order.append(cf_sc)
else:
raise ValueError("Invalid ClassyFire level: '%s'" % cf_level)
# Plot
results_out = []
for ms2scorer, res_sub in results.groupby("ms2scorer"):
for ax_col_idx, k in enumerate(ks):
# Get the axis to draw in
ax = axrr[d_row2scorer[ms2scorer], ax_col_idx]
# Compute the top-k accuracies for Only MS and MS + RT
_res_sub = []
for rep in range(n_samples):
_res_sub.append(
res_sub
.sample(frac=1, random_state=rep)
.drop_duplicates("correct_structure")
.groupby("classyfire_%s" % cf_level)
.agg({
"rank_onlyms": lambda x: _get_topk(x, k, topk_method),
"rank_msplrt": lambda x: _get_topk(x, k, topk_method),
"n_cand": np.median,
"n_isomers": lambda x: "min=%d, max=%d, avg=%.1f, med=%.1f" % (
np.min(x), np.max(x), np.mean(x), np.median(x)
),
"molecule_identifier": len
})
.rename({
"rank_onlyms": "top_k_p_onlyms",
"rank_msplrt": "top_k_p_msplrt",
"molecule_identifier": "n_class_support"
}, axis=1)
.reset_index()
)
_res_sub = pd.concat(_res_sub, ignore_index=True)
# Add the top-k improvement in percentage-points
_res_sub = _res_sub.assign(top_k_p_improvement=(_res_sub["top_k_p_msplrt"] - _res_sub["top_k_p_onlyms"]))
# Filter classes without enough support
_res_sub = _res_sub[_res_sub["n_class_support"] >= min_class_support]
if len(_res_sub) == 0:
raise ValueError("No class has enough support.")
ax = sns.barplot(
data=_res_sub, x="classyfire_%s" % cf_level, y="top_k_p_improvement", ax=ax, palette=palette,
order=order, seed=1020
)
ax.grid(axis="y")
ax.hlines(0, ax.get_xlim()[0] - 1, ax.get_xlim()[1] + 1, color='k', linestyle="--")
ax.set_title("%s - top-%d" % (ms2scorer, k), fontweight="bold")
ax.bar_label(
ax.containers[0],
labels=[
"%.1f" % _l
for _l in _res_sub.groupby("classyfire_%s" % cf_level)["top_k_p_onlyms"].mean().tolist()
],
rotation=90, horizontalalignment="center", fmt="%.1f", label_type="edge", padding=10, fontsize=12
)
if d_row2scorer[ms2scorer] == (n_scorer - 1):
ax.set_xticklabels(
[
plt.Text(
_tl.get_position()[0], _tl.get_position()[1],
"%s (n=%d)" %
(
_tl.get_text(),
_res_sub[_res_sub["classyfire_%s" % cf_level] == _tl.get_text()]["n_class_support"].iloc[0]
)
)
for _tl in ax.get_xticklabels()
],
rotation=label_rot_angle, horizontalalignment="center", fontsize=12
)
ax.set_xlabel("ClassyFire: %s" % {"superclass": "Super-class", "class": "Class"}[cf_level], fontsize=12)
else:
ax.set_xlabel("")
if ax_col_idx == 0:
ax.set_ylabel("Top-k accuracy\nimprovement (%p)", fontsize=12)
else:
ax.set_ylabel("")
results_out.append(
_res_sub
.groupby("classyfire_%s" % cf_level)
.agg({
"top_k_p_onlyms": np.mean,
"top_k_p_msplrt": np.mean,
"top_k_p_improvement": np.mean,
"n_cand": lambda x: x.iloc[0],
"n_isomers": lambda x: x.iloc[0],
"n_class_support": lambda x: x.iloc[0],
})
.assign(k=k, ms2scorer=ms2scorer)
.reset_index()
)
# Compute the average improvement into actual counts
results_out[-1]["improvement_in_n"] = \
(results_out[-1]["n_class_support"] * results_out[-1]["top_k_p_improvement"]) / 100
# Adjust y-axis range to provide enough space for the labels
_y_add = {1: 1.25, 5: 0.9, 20: 1.5}
for ax_col_idx, _k in enumerate(ks):
for ax in axrr[:, ax_col_idx]:
_y_min, _y_max = ax.get_ylim()
ax.set_ylim(_y_min - _y_add.get(_k, 0.0), _y_max)
plt.tight_layout()
return pd.concat(results_out, ignore_index=True), superlevel
def _get_res_set(df: pd.DataFrame):
return set((
(row["eval_indx"], row["dataset"])
for index, row in df.loc[:, ["eval_indx", "dataset"]].drop_duplicates().iterrows()
))
def _restrict_df(df: pd.DataFrame, res_set: set):
if df is None:
return None
df_out = [row for _, row in df.iterrows() if (row["eval_indx"], row["dataset"]) in res_set]
return pd.DataFrame(df_out)
def _process_dfs__01(res__baseline, res__ssvm, res__rtfilter, res__xlogp3, res__bach2020, raise_on_missing_results):
n_scorer = len(res__baseline)
res_sets = []
for i in range(n_scorer):
restrict_results = False
# Only MS2
_res_set_baseline = _get_res_set(res__baseline[i])
res_sets.append(_res_set_baseline)
# SSVM
_res = _get_res_set(res__ssvm[i])
if _res != _res_set_baseline:
if raise_on_missing_results:
raise ValueError("SSVM has missing results!")
else:
res_sets[-1] &= _res
restrict_results = True
# RT filtering
_res = _get_res_set(res__rtfilter[i])
if _res != _res_set_baseline:
if raise_on_missing_results:
raise ValueError("RT filtering has missing results!")
else:
res_sets[-1] &= _res
restrict_results = True
# XLogP3
_res = _get_res_set(res__xlogp3[i])
if _res != _res_set_baseline:
if raise_on_missing_results:
raise ValueError("XLogP3 has missing results!")
else:
res_sets[-1] &= _res
restrict_results = True
# Bach et al. (2020)
_res = _get_res_set(res__bach2020[i])
if _res != _res_set_baseline:
if raise_on_missing_results:
raise ValueError("Bach et al. (2020) has missing results!")
else:
res_sets[-1] &= _res
restrict_results = True
if restrict_results:
res__baseline[i] = _restrict_df(res__baseline[i], res_sets[i])
res__ssvm[i] = _restrict_df(res__ssvm[i], res_sets[i])
res__rtfilter[i] = _restrict_df(res__rtfilter[i], res_sets[i])
res__xlogp3[i] = _restrict_df(res__xlogp3[i], res_sets[i])
res__bach2020[i] = _restrict_df(res__bach2020[i], res_sets[i])
# Sort results so that the rows would match
for i in range(n_scorer):
res__baseline[i] = res__baseline[i].sort_values(by=["dataset", "eval_indx", "k"])
res__ssvm[i] = res__ssvm[i].sort_values(by=["dataset", "eval_indx", "k"])
res__rtfilter[i] = res__rtfilter[i].sort_values(by=["dataset", "eval_indx", "k"])
res__xlogp3[i] = res__xlogp3[i].sort_values(by=["dataset", "eval_indx", "k"])
res__bach2020[i] = res__bach2020[i].sort_values(by=["dataset", "eval_indx", "k"])
return res__baseline, res__ssvm, res__rtfilter, res__xlogp3, res__bach2020
def plot__01__a(
res__baseline: List[pd.DataFrame],
res__ssvm: List[pd.DataFrame],
res__rtfilter: List[pd.DataFrame],
res__xlogp3: List[pd.DataFrame],
res__bach2020: List[pd.DataFrame],
aspect: str = "landscape",
max_k: int = 20,
weighted_average: bool = False,
raise_on_missing_results: bool = True,
verbose: bool = False
):
"""
Plot comparing the top-k accuracy performance for k in {1, ..., max_k} of the different scoring methods:
- baseline: Only MS2 information is used
- ssvm: Proposed Structured Support Vector Regression (SSVM) model
- rtfilter: Candidate filtering using retention time errors
- xlogp3: Candidate re-ranking using predicted XLogP3 values
- bach2020: Retention order and MS2 score integration framework by Bach et al. 2020
The for each scoring method a list of dataframes is provided. Each DataFrame has the following structure:
k top_k_method scoring_method correct_leq_k seq_length n_models eval_indx dataset top_k_acc ds lloss_mode mol_feat mol_id ms2scorer ssvm_flavor
1 csi Only-MS2 3.000000 50 8 0 AC_003 6.000000 AC_003 mol_feat_fps FCFP__binary__all__2D cid CFM-ID (v4) default
2 csi Only-MS2 5.000000 50 8 0 AC_003 10.000000 AC_003 mol_feat_fps FCFP__binary__all__2D cid CFM-ID (v4) default
3 csi Only-MS2 7.000000 50 8 0 AC_003 14.000000 AC_003 mol_feat_fps FCFP__binary__all__2D cid CFM-ID (v4) default
4 csi Only-MS2 9.000000 50 8 0 AC_003 18.000000 AC_003 mol_feat_fps FCFP__binary__all__2D cid CFM-ID (v4) default
5 csi Only-MS2 11.000000 50 8 0 AC_003 22.000000 AC_003 mol_feat_fps FCFP__binary__all__2D cid CFM-ID (v4) default
...
whereby the "scoring_method" differs.
Each list element corresponds to a different MS2 base-scorer, e.g. CFM-ID, MetFrag, ...
:param res__baseline: list of dataframe, containing the ranking results for Only MS2.
:param res__ssvm: list of dataframe, containing the ranking results for the SSVM approach.
:param res__rtfilter: list of dataframe, containing the RT filtering results.
:param res__xlogp3: list of dataframe, containing the XLogP3 re-scoring results.
:param res__bach2020: list of dataframe, containing the results achieved by Bach et al.'s method.
:param aspect: string, indicating which layout for the plot should be used:
"landscape":
CFMID METFRAG SIRIUS
____ ____ ____
| | | | ... | | Top-k
|____| |____| |____|
____ ____ ____
| | | | ... | | Top-k improvement over the baseline
|____| |____| |____|
"portrait":
Top-k Top-k improvement over the baseline
____ ____
CFMID | | | |
|____| |____|
____ ____
MEFRAG | | | |
|____| |____|
.
.
.
____ ____
SIRIUS | | | |
|____| |____|
:param max_k: scalar, what is the maximum k value for the top-k curve plot.
:param weighted_average: boolean, indicating whether the average the top-k accuracy should be first computed within
each dataset and subsequently averaged across the datasets. If False, than all samples are treated equally and
simply averaged directly across all datasets.
:param raise_on_missing_results: boolean, indicating whether an error should be raised if results are missing. If
False, than only those results which are available for all scoring methods of a particular MS2 base-scorer are
considered for the plots.
:param verbose: boolean, indicating whether all kinds of stuff should be printed, which somehow can be helpful for
debugging.
:return: pd.DataFrame, data shown in the plot for publication.
"""
def _acc_info_printer(baseline, other, k):
print(
"\ttop-%d: baseline = %.1f%%, other = %.1f%%, improvement = %.1f%%p, gain = %.1f%%, n = %.1f" %
(
k,
baseline["top_k_acc"][other["k"] == k],
other["top_k_acc"][other["k"] == k],
(other["top_k_acc"] - baseline["top_k_acc"])[other["k"] == k],
((other["top_k_acc"] / baseline["top_k_acc"])[other["k"] == k] - 1) * 100,
(other["correct_leq_k"] - baseline["correct_leq_k"])[other["k"] == k]
)
)
assert aspect in ["landscape", "portrait"], "Invalid aspect value: '%s'" % aspect
# Number of MS2 scorers must be equal
assert len(res__baseline) == len(res__ssvm)
assert len(res__baseline) == len(res__rtfilter)
assert len(res__baseline) == len(res__xlogp3)
assert len(res__baseline) == len(res__bach2020)
n_scorer = len(res__baseline)
# There should be only one scoring and one top-k accuracy computation method in each dataframe
for k in ["scoring_method", "top_k_method", "ms2scorer"]:
for i in range(n_scorer):
assert res__baseline[i][k].nunique() == 1
assert res__ssvm[i][k].nunique() == 1
assert res__rtfilter[i][k].nunique() == 1
assert res__xlogp3[i][k].nunique() == 1
assert res__bach2020[i][k].nunique() == 1
# For the SSVM all results should be 8 SSVM models
for i in range(n_scorer):
assert np.all(res__ssvm[i]["n_models"] == 8), "There seems to be SSVM models missing."
# Get all available results and restrict them if needed by only using the result intersection
res__baseline, res__ssvm, res__rtfilter, res__xlogp3, res__bach2020 = _process_dfs__01(
res__baseline, res__ssvm, res__rtfilter, res__xlogp3, res__bach2020, raise_on_missing_results
)
# Get a new figure
if aspect == "portrait":
_n_rows = n_scorer
_n_cols = 2
_figsize = (9, 3 * n_scorer)
else: # landscape
_n_rows = 2
_n_cols = n_scorer
_figsize = (4.5 * n_scorer, 5.75)
fig, axrr = plt.subplots(_n_rows, _n_cols, figsize=_figsize, sharex="all", sharey=False, squeeze=False)
# Set some plot properties
k_ticks = np.arange(0, max_k + 1, 5)
k_ticks[0] = 1
# For Machine Intelligence we need to provide the raw-data for the plot
res_out = []
# Plot Top-k curve
if verbose:
print("We expect 17500 result rows")
for idx, (a, b, c, d, e) in enumerate(zip(res__baseline, res__ssvm, res__rtfilter, res__xlogp3, res__bach2020)):
assert a["ms2scorer"].unique().item() == b["ms2scorer"].unique().item()
assert a["ms2scorer"].unique().item() == c["ms2scorer"].unique().item()
assert a["ms2scorer"].unique().item() == d["ms2scorer"].unique().item()
assert a["ms2scorer"].unique().item() == e["ms2scorer"].unique().item()
if verbose:
print("Rows (MS2-scorer='%s'):" % a["ms2scorer"].unique().item())
print("Number of samples: %d" % (a["k"] == 1).sum())
# Get current axis and set labels
if aspect == "portrait":
# first column
ax = axrr[idx, 0]
ax.set_title(a["ms2scorer"].unique().item(), fontweight="bold")
ax.set_ylabel("Top-k accuracy (%)", fontsize=12)
# second column
ax2 = axrr[idx, 1]
ax2.set_title(a["ms2scorer"].unique().item(), fontweight="bold")
ax2.set_ylabel("Top-k accuracy\nimprovement (%p)", fontsize=12)
else:
# first row
ax = axrr[0, idx]
ax.set_title(a["ms2scorer"].unique().item(), fontweight="bold")
axrr[0, 0].set_ylabel("Top-k accuracy (%)", fontsize=12)
# second row
ax2 = axrr[1, idx]
axrr[1, 0].set_ylabel("Top-k accuracy\nimprovement (%p)", fontsize=12)
# Baseline
if verbose:
print("Baseline: ", len(a))
if weighted_average:
bl = a[a["k"] <= max_k].groupby(["dataset", "k"]).mean().reset_index().groupby("k").mean().reset_index()
else:
bl = a[a["k"] <= max_k].groupby("k").mean().reset_index()
ax.step(bl["k"], bl["top_k_acc"], where="post", label=a["scoring_method"].unique().item(), color="black")
ax2.hlines(0, 1, max_k, colors="black", label=a["scoring_method"].unique().item())
res_out += list(zip(
bl["k"], bl["top_k_acc"], [a["scoring_method"].unique().item()] * max_k,
[a["ms2scorer"].unique().item()] * max_k
))
# ---
# SSVM
if verbose:
print("SSVM: ", len(b))
if weighted_average:
tmp = b[b["k"] <= max_k].groupby(["dataset", "k"]).mean().reset_index().groupby("k").mean().reset_index()
else:
tmp = b[b["k"] <= max_k].groupby("k").mean().reset_index()
ax.step(tmp["k"], tmp["top_k_acc"], where="post", label=b["scoring_method"].unique().item(), color="blue")
assert np.all(tmp["k"] == bl["k"])
ax2.step(
tmp["k"], tmp["top_k_acc"] - bl["top_k_acc"], where="post", label=b["scoring_method"].unique().item(),
color="blue"
)
if verbose:
for _k in [1, 20]:
_acc_info_printer(bl, tmp, _k)
res_out += list(zip(
tmp["k"], tmp["top_k_acc"], [b["scoring_method"].unique().item()] * max_k,
[a["ms2scorer"].unique().item()] * max_k
))
# ---
# RT filtering
if verbose:
print("RT filtering: ", len(c))
if weighted_average:
tmp = c[c["k"] <= max_k].groupby(["dataset", "k"]).mean().reset_index().groupby("k").mean().reset_index()
else:
tmp = c[c["k"] <= max_k].groupby("k").mean().reset_index()
ax.step(tmp["k"], tmp["top_k_acc"], where="post", label=c["scoring_method"].unique().item(), color="red")
assert np.all(tmp["k"] == bl["k"])
ax2.step(
tmp["k"], tmp["top_k_acc"] - bl["top_k_acc"], where="post", label=c["scoring_method"].unique().item(),
color="red"
)
if verbose:
for _k in [1, 20]:
_acc_info_printer(bl, tmp, _k)
res_out += list(zip(
tmp["k"], tmp["top_k_acc"], [c["scoring_method"].unique().item()] * max_k,
[a["ms2scorer"].unique().item()] * max_k
))
# ---
# XLogP3
if verbose:
print("XLogP3: ", len(d))
if weighted_average:
tmp = d[d["k"] <= max_k].groupby(["dataset", "k"]).mean().reset_index().groupby("k").mean().reset_index()
else:
tmp = d[d["k"] <= max_k].groupby("k").mean().reset_index()
ax.step(tmp["k"], tmp["top_k_acc"], where="post", label=d["scoring_method"].unique().item(), color="green")
assert np.all(tmp["k"] == bl["k"])
ax2.step(
tmp["k"], tmp["top_k_acc"] - bl["top_k_acc"], where="post", label=d["scoring_method"].unique().item(),
color="green"
)
if verbose:
for _k in [1, 20]:
_acc_info_printer(bl, tmp, _k)
res_out += list(zip(
tmp["k"], tmp["top_k_acc"], [d["scoring_method"].unique().item()] * max_k,
[a["ms2scorer"].unique().item()] * max_k
))
# ---
# Bach et al. (2020)
if verbose:
print("Bach et al. (2020)", len(e))
if weighted_average:
tmp = e[e["k"] <= max_k].groupby(["dataset", "k"]).mean().reset_index().groupby(
"k").mean().reset_index()
else:
tmp = e[e["k"] <= max_k].groupby("k").mean().reset_index()
ax.step(tmp["k"], tmp["top_k_acc"], where="post", label=e["scoring_method"].unique().item(), color="orange")
assert np.all(tmp["k"] == bl["k"])
ax2.step(
tmp["k"], tmp["top_k_acc"] - bl["top_k_acc"], where="post", label=e["scoring_method"].unique().item(),
color="orange"
)
if verbose:
for _k in [1, 20]:
_acc_info_printer(bl, tmp, _k)
res_out += list(zip(
tmp["k"], tmp["top_k_acc"], [e["scoring_method"].unique().item()] * max_k,
[a["ms2scorer"].unique().item()] * max_k
))
# ---
# Set some further axis properties
ax.set_xticks(k_ticks)
ax2.set_xticks(k_ticks)
ax.grid(axis="y")
ax2.grid(axis="y")
if (aspect == "portrait") and (idx == (n_scorer - 1)):
ax.set_xlabel("k")
ax2.set_xlabel("k")
elif aspect == "landscape":
ax2.set_xlabel("k")
# There should be only a single legend in the figure
# TODO: Would be nice to get that one below the plots
axrr[0, 0].legend()
plt.tight_layout()
return pd.DataFrame(res_out, columns=["k", "avg_top_k_acc", "scoring_method", "ms2scorer"])
def _compute_color(baseline, other, ctype):
if ctype.startswith("gain"):
cvalue = (other / baseline) - 1
if ctype.endswith("perc"):
cvalue *= 100
elif ctype == "improvement":
cvalue = other - baseline
else:
raise ValueError("Invalid ctype: '%s'." % ctype)
return cvalue
def _reshape_output(df_d):
return [
_df.melt(ignore_index=False, value_name="top_k_acc").reset_index().assign(ms2scorer=_ms2scorer_i, k=_k)
for (_ms2scorer_i, _k), _df in df_d.items()
]
def plot__01__b(
res__baseline: List[pd.DataFrame],
res__ssvm: List[pd.DataFrame],
res__rtfilter: List[pd.DataFrame],
res__xlogp3: List[pd.DataFrame],
res__bach2020: List[pd.DataFrame],
ks: Optional[List[int]] = None,
ctype: str = "improvement",
weighted_average: bool = False,
raise_on_missing_results: bool = True,
label_format: str = ".0f",
verbose: bool = False
):
"""
Plot to illustrate the performance difference between Only MS2 and the four (4) different score integration
approaches. The input structure is the same as for "plot__01__a".
:param res__baseline: list of dataframe, containing the ranking results for Only MS2.
:param res__ssvm: list of dataframe, containing the ranking results for the SSVM approach.
:param res__rtfilter: list of dataframe, containing the RT filtering results.
:param res__xlogp3: list of dataframe, containing the XLogP3 re-scoring results.
:param res__bach2020: list of dataframe, containing the results achieved by Bach et al.'s method.
:param ks: list of scalars, top-k values to plot. By default, the variable is set to [1, 20], which means that the
top-1 and top-20 values will be plotted.
:param ctype: string, which statistic should be encoded using the color of the heatmap plot. Choises are:
"improvement": Difference between top-k (score integration) and top-k (baseline) in percentage points.
"gain": Performance gain of top-k (score integration) over top-k (baseline)
"gain_perc": Performance gain of top-k (score integration) over top-k (baseline) in percentages
:param weighted_average: boolean, indicating whether the average the top-k accuracy should be first computed within
each dataset and subsequently averaged across the datasets. If False, than all samples are treated equally and
simply averaged directly across all datasets.
:param raise_on_missing_results: boolean, indicating whether an error should be raised if results are missing. If
False, than only those results which are available for all scoring methods of a particular MS2 base-scorer are
considered for the plots.
:param label_format: string, format string for the labels. Default: Rounded to full number.
:param verbose: boolean, indicating whether all kinds of stuff should be printed, which somehow can be helpful for
debugging.
:return: pd.DataFrame, data shown in the plot for publication.
"""
assert ctype in ["improvement", "gain", "gain_perc"], "Invalid ctype value: '%s'" % ctype
ctype_labels = {
"improvement": "Top-k acc. improvement (%p)",
"gain": "Performance gain",
"gain_perc": "Performance gain (%)"
}
# Total number of scoring methods in our manuscript
n_methods = 5
# Number of MS2 scorers must be equal
assert len(res__baseline) == len(res__ssvm)
assert len(res__baseline) == len(res__rtfilter)
assert len(res__baseline) == len(res__xlogp3)
assert len(res__baseline) == len(res__bach2020)
n_scorer = len(res__baseline)
# There should be only one scoring and one top-k accuracy computation method in each dataframe
for k in ["scoring_method", "top_k_method", "ms2scorer"]:
for i in range(n_scorer):
assert res__baseline[i][k].nunique() == 1
assert res__ssvm[i][k].nunique() == 1
assert res__rtfilter[i][k].nunique() == 1
assert res__xlogp3[i][k].nunique() == 1
assert res__bach2020[i][k].nunique() == 1
# For the SSVM all results should be 8 SSVM models
for i in range(n_scorer):
assert np.all(res__ssvm[i]["n_models"] == 8), "There seems to be SSVM models missing."
# Get all available results and restrict them if needed by only using the result intersection
res__baseline, res__ssvm, res__rtfilter, res__xlogp3, res__bach2020 = _process_dfs__01(
res__baseline, res__ssvm, res__rtfilter, res__xlogp3, res__bach2020, raise_on_missing_results
)
# Get number of datasets
datasets = [res__baseline[i]["dataset"].unique().tolist() for i in range(n_scorer)]
# Get a new figure
fig, axrr = plt.subplots(n_scorer, len(ks), figsize=(20, 5 * n_scorer), sharex=False, sharey="row", squeeze=False)
# Plot Top-k curve
if verbose:
print("We expect 17500 result rows")
# For Machine Intelligence we need to write out the content of the figure
_label_df = {}
_color_df = {}
# Do the plotting ...
for i, _res in enumerate(zip(res__baseline, res__ssvm, res__rtfilter, res__xlogp3, res__bach2020)):
_ms2scorer_i = _res[0]["ms2scorer"].unique().item()
assert _ms2scorer_i == _res[1]["ms2scorer"].unique().item()
assert _ms2scorer_i == _res[2]["ms2scorer"].unique().item()
assert _ms2scorer_i == _res[3]["ms2scorer"].unique().item()
assert _ms2scorer_i == _res[4]["ms2scorer"].unique().item()
if verbose:
print("Rows (MS2-scorer='%s'):" % _ms2scorer_i)
print("Number of samples: %d" % (_res[0]["k"] == 1).sum())
# Top-k accuracy matrices: (1) label matrix and (2) color encoding matrix
lab_val_mat = np.full((len(ks), n_methods, len(datasets[i]) + 1), fill_value=np.nan)
col_val_mat = np.full((len(ks), n_methods, len(datasets[i]) + 1), fill_value=np.nan)
# shape = (
# number_of_ks_to_plot,
# number_of_score_integration_methods,
# number_of_datasets_plus_avg
# )
lab_val_d = {}
for j, k in enumerate(ks):
# Get current axis
ax = axrr[i, j]
# i: Each MS2 scorer is plotted into its own row
# j: Each top-k is plotted into its own column
for l, ds in enumerate(datasets[i]):
# Top-k accuracy as label
for m in range(n_methods):
# Get the top-k values for the current dataset (= MassBank group) and the current value of k
# (top-k). This might be several values, depending on the number of evaluation samples in each
# dataset.
_top_k_values = _res[m][(_res[m]["dataset"] == ds) & (_res[m]["k"] == k)]["top_k_acc"].values
# As label, we use the average performance (WITHIN DATASET).
lab_val_mat[j, m, l] = np.mean(_top_k_values)
if not weighted_average:
lab_val_d[(j, m, l)] = _top_k_values
# Performance gain or improvement as color
for m in range(n_methods):
# Note: The first score integration method is Only MS2 (= baseline)
col_val_mat[j, m, l] = _compute_color(
baseline=lab_val_mat[j, 0, l], other=lab_val_mat[j, m, l], ctype=ctype
)
# Compute average performance (ACROSS THE DATASETS)
if weighted_average:
lab_val_mat[j, :, -1] = np.mean(lab_val_mat[j, :, :-1], axis=1)
else:
for m in range(n_methods):
lab_val_mat[j, m, -1] = np.mean(
np.concatenate(
[lab_val_d[(j, m, l)] for l in range(len(datasets[i]))]
)
)
for m in range(n_methods):
col_val_mat[j, m, -1] = _compute_color(
baseline=lab_val_mat[j, 0, -1], other=lab_val_mat[j, m, -1], ctype=ctype
)
# Wrap the matrices into dataframes
_index = pd.Index(
data=[_res[m]["scoring_method"].unique().item() for m in range(n_methods)],
name="scoring_method"
)
_columns = pd.Index(data=datasets[i] + ["AVG."], name="dataset")
_label_df[(_ms2scorer_i, k)] = pd.DataFrame(lab_val_mat[j], index=_index, columns=_columns)
_color_df[(_ms2scorer_i, k)] = pd.DataFrame(col_val_mat[j], index=_index, columns=_columns)
# Plot the heatmap
sns.heatmap(
data=_color_df[(_ms2scorer_i, k)],
# -- Label design --
annot=_label_df[(_ms2scorer_i, k)],
fmt=label_format,
annot_kws={"fontweight": "normal"},
# -- Color design --
center=0,
cmap="PiYG",
cbar_kws={
"location": "bottom",
"orientation": "horizontal",
"shrink": 0.5,
"label": ctype_labels[ctype]
},
linewidths=0.75,
ax=ax,
square=True
)
# Visually separate the baseline and average cells from the rest
ax.hlines(1, 0, col_val_mat.shape[2], color="black", linewidth=0.75)
ax.vlines(len(datasets[i]), 0, n_methods, color="black", linewidth=0.75)
ax.set_title(
"%s - top-%d accuracy" % (_res[0]["ms2scorer"].unique().item(), k), fontsize=14, fontweight="bold",
pad=16
)
if i == (n_scorer - 1):
ax.set_xlabel("Dataset (MassBank group)")
else:
ax.set_xlabel("")
if j == 0:
ax.set_ylabel("Scoring method")
else:
ax.set_ylabel("")
plt.tight_layout()
# Reshape output for the figure reproduction and return
return pd.concat(_reshape_output(_label_df), ignore_index=True), \
pd.concat(_reshape_output(_color_df), ignore_index=True)
def _process_dfs__03(res__baseline, res__ssvm__2D, res__ssvm__3D, raise_on_missing_results):
n_scorer = len(res__baseline)
res_sets = []
for i in range(n_scorer):
restrict_results = False
# Only MS2
_res_set_baseline = _get_res_set(res__baseline[i])
res_sets.append(_res_set_baseline)
# SSVM (2D fingerprints)
_res = _get_res_set(res__ssvm__2D[i])
if _res != _res_set_baseline:
if raise_on_missing_results:
raise ValueError("SSVM has missing results!")
else:
res_sets[-1] &= _res
restrict_results = True
# SSVM (3D fingerprints)
_res = _get_res_set(res__ssvm__3D[i])
if _res != _res_set_baseline:
if raise_on_missing_results:
raise ValueError("RT filtering has missing results!")
else:
res_sets[-1] &= _res
restrict_results = True
if restrict_results:
res__baseline[i] = _restrict_df(res__baseline[i], res_sets[i])
res__ssvm__2D[i] = _restrict_df(res__ssvm__2D[i], res_sets[i])
res__ssvm__3D[i] = _restrict_df(res__ssvm__3D[i], res_sets[i])
# Sort results so that the rows would match
for i in range(n_scorer):
res__baseline[i] = res__baseline[i].sort_values(by=["dataset", "eval_indx", "k"])
res__ssvm__2D[i] = res__ssvm__2D[i].sort_values(by=["dataset", "eval_indx", "k"])
res__ssvm__3D[i] = res__ssvm__3D[i].sort_values(by=["dataset", "eval_indx", "k"])
return res__baseline, res__ssvm__2D, res__ssvm__3D
def plot__03__a(
res__baseline: List[pd.DataFrame],
res__ssvm__2D: List[pd.DataFrame],
res__ssvm__3D: List[pd.DataFrame],
aspect: str = "landscape",
max_k: int = 20,
weighted_average: bool = False,
raise_on_missing_results: bool = True,
verbose: bool = False
):
"""
Plot comparing the top-k accuracy performance for k in {1, ..., max_k} of the different scoring methods:
- baseline: Only MS2 information is used
- ssvm__2D: Proposed Structured Support Vector Regression (SSVM) model with 2D fingerprints
- ssvm__3D: Proposed Structured Support Vector Regression (SSVM) model with 3D fingerprints
The for each scoring method a list of dataframes is provided. Each DataFrame has the following structure:
k top_k_method scoring_method correct_leq_k seq_length n_models eval_indx dataset top_k_acc ds lloss_mode mol_feat mol_id ms2scorer ssvm_flavor
1 csi Only-MS2 3.000000 50 8 0 AC_003 6.000000 AC_003 mol_feat_fps FCFP__binary__all__2D cid CFM-ID (v4) default
2 csi Only-MS2 5.000000 50 8 0 AC_003 10.000000 AC_003 mol_feat_fps FCFP__binary__all__2D cid CFM-ID (v4) default
3 csi Only-MS2 7.000000 50 8 0 AC_003 14.000000 AC_003 mol_feat_fps FCFP__binary__all__2D cid CFM-ID (v4) default
4 csi Only-MS2 9.000000 50 8 0 AC_003 18.000000 AC_003 mol_feat_fps FCFP__binary__all__2D cid CFM-ID (v4) default
5 csi Only-MS2 11.000000 50 8 0 AC_003 22.000000 AC_003 mol_feat_fps FCFP__binary__all__2D cid CFM-ID (v4) default
...
whereby the "scoring_method" differs.
Each list element corresponds to a different MS2 base-scorer, e.g. CFM-ID, MetFrag, ...
:param res__baseline: list of dataframe, containing the ranking results for Only MS2.
:param res__ssvm__2D: list of dataframe, containing the ranking results for the SSVM approach with 2D fingerprints.
:param res__ssvm__3D: list of dataframe, containing the ranking results for the SSVM approach with 3D fingerprints.
:param aspect: string, indicating which layout for the plot should be used:
"landscape":
CFMID METFRAG SIRIUS
____ ____ ____
| | | | ... | | Top-k
|____| |____| |____|
____ ____ ____
| | | | ... | | Top-k improvement over the baseline
|____| |____| |____|
"portrait":
Top-k Top-k improvement over the baseline
____ ____
CFMID | | | |
|____| |____|
____ ____
MEFRAG | | | |
|____| |____|
.
.
.
____ ____
SIRIUS | | | |
|____| |____|
:param max_k: scalar, what is the maximum k value for the top-k curve plot.
:param weighted_average: boolean, indicating whether the average the top-k accuracy should be first computed within
each dataset and subsequently averaged across the datasets. If False, than all samples are treated equally and
simply averaged directly across all datasets.
:param raise_on_missing_results: boolean, indicating whether an error should be raised if results are missing. If
False, than only those results which are available for all scoring methods of a particular MS2 base-scorer are
considered for the plots.
:param verbose: boolean, indicating whether all kinds of stuff should be printed, which somehow can be helpful for
debugging.
:return: pd.DataFrame, data shown in the plot for publication.
"""
def _acc_info_printer(baseline, other, k):
print(
"\ttop-%d: baseline = %.1f%%, other = %.1f%%, improvement = %.1f%%p, gain = %.1f%%, n = %.1f" %
(
k,
baseline["top_k_acc"][other["k"] == k],
other["top_k_acc"][other["k"] == k],
(other["top_k_acc"] - baseline["top_k_acc"])[other["k"] == k],
((other["top_k_acc"] / baseline["top_k_acc"])[other["k"] == k] - 1) * 100,
(other["correct_leq_k"] - baseline["correct_leq_k"])[other["k"] == k]
)
)
assert aspect in ["landscape", "portrait"], "Invalid aspect value: '%s'" % aspect
# Number of MS2 scorers must be equal
assert len(res__baseline) == len(res__ssvm__2D)
assert len(res__baseline) == len(res__ssvm__3D)
n_scorer = len(res__baseline)
# There should be only one scoring and one top-k accuracy computation method in each dataframe
for k in ["scoring_method", "top_k_method", "ms2scorer"]:
for i in range(n_scorer):
assert res__baseline[i][k].nunique() == 1
assert res__ssvm__2D[i][k].nunique() == 1
assert res__ssvm__3D[i][k].nunique() == 1
# For the SSVM all results should be 8 SSVM models
for i in range(n_scorer):
assert np.all(res__ssvm__2D[i]["n_models"] == 8), "2D: There seems to be SSVM models missing."
assert np.all(res__ssvm__3D[i]["n_models"] == 8), "2D: There seems to be SSVM models missing."
# Get all available results and restrict them if needed by only using the result intersection
res__baseline, res__ssvm__2D, res__ssvm__3D, = _process_dfs__03(
res__baseline, res__ssvm__2D, res__ssvm__3D, raise_on_missing_results
)
# Get a new figure
if aspect == "portrait":
_n_rows = n_scorer
_n_cols = 2
_figsize = (9, 3 * n_scorer)
else: # landscape
_n_rows = 2
_n_cols = n_scorer
_figsize = (4.5 * n_scorer, 5.75)
fig, axrr = plt.subplots(_n_rows, _n_cols, figsize=_figsize, sharex="all", sharey=False, squeeze=False)
# Set some plot properties
k_ticks = np.arange(0, max_k + 1, 5)
k_ticks[0] = 1
# For Machine Intelligence we need to provide the raw-data for the plot
res_out = []
# Plot Top-k curve
if verbose:
print("We expect 4700 result rows")
for idx, (a, b, c) in enumerate(zip(res__baseline, res__ssvm__2D, res__ssvm__3D)):
assert a["ms2scorer"].unique().item() == b["ms2scorer"].unique().item()
assert a["ms2scorer"].unique().item() == c["ms2scorer"].unique().item()
if verbose:
print("Rows (MS2-scorer='%s'):" % a["ms2scorer"].unique().item())
print("Number of samples: %d" % (a["k"] == 1).sum())
# Get current axis and set labels
if aspect == "portrait":
# first column
ax = axrr[idx, 0]
ax.set_title(a["ms2scorer"].unique().item(), fontweight="bold")
ax.set_ylabel("Top-k accuracy (%)", fontsize=12)
# second column
ax2 = axrr[idx, 1]
ax2.set_title(a["ms2scorer"].unique().item(), fontweight="bold")
ax2.set_ylabel("Top-k accuracy\nimprovement (%p)", fontsize=12)
else:
# first row
ax = axrr[0, idx]
ax.set_title(a["ms2scorer"].unique().item(), fontweight="bold")
axrr[0, 0].set_ylabel("Top-k accuracy (%)", fontsize=12)
# second row
ax2 = axrr[1, idx]
axrr[1, 0].set_ylabel("Top-k accuracy\nimprovement (%p)", fontsize=12)
# Baseline
if verbose:
print("Baseline: ", len(a))
if weighted_average:
bl = a[a["k"] <= max_k].groupby(["dataset", "k"]).mean().reset_index().groupby("k").mean().reset_index()
else:
bl = a[a["k"] <= max_k].groupby("k").mean().reset_index()
ax.step(bl["k"], bl["top_k_acc"], where="post", label=a["scoring_method"].unique().item(), color="black")
ax2.hlines(0, 1, max_k, colors="black", label=a["scoring_method"].unique().item())
res_out += list(zip(
bl["k"], bl["top_k_acc"], [a["scoring_method"].unique().item()] * max_k,
[a["ms2scorer"].unique().item()] * max_k
))
# ---
# SSVM (2D)
if verbose:
print("SSVM (2D): ", len(b))
if weighted_average:
tmp = b[b["k"] <= max_k].groupby(["dataset", "k"]).mean().reset_index().groupby("k").mean().reset_index()
else:
tmp = b[b["k"] <= max_k].groupby("k").mean().reset_index()
ax.step(
tmp["k"], tmp["top_k_acc"], where="post", label=b["scoring_method"].unique().item(), color="blue",
linestyle="dashed"
)
assert np.all(tmp["k"] == bl["k"])
ax2.step(
tmp["k"], tmp["top_k_acc"] - bl["top_k_acc"], where="post", label=b["scoring_method"].unique().item(),
color="blue", linestyle="dashed"
)
if verbose:
for _k in [1, 20]:
_acc_info_printer(bl, tmp, _k)
res_out += list(zip(
tmp["k"], tmp["top_k_acc"], [b["scoring_method"].unique().item()] * max_k,
[a["ms2scorer"].unique().item()] * max_k
))
# ---
# SSVM (3D)
if verbose:
print("SSVM (3D): ", len(c))
if weighted_average:
tmp = c[c["k"] <= max_k].groupby(["dataset", "k"]).mean().reset_index().groupby("k").mean().reset_index()
else:
tmp = c[c["k"] <= max_k].groupby("k").mean().reset_index()
ax.step(
tmp["k"], tmp["top_k_acc"], where="post", label=c["scoring_method"].unique().item(), color="blue",
linestyle="dotted"
)
assert np.all(tmp["k"] == bl["k"])
ax2.step(
tmp["k"], tmp["top_k_acc"] - bl["top_k_acc"], where="post", label=c["scoring_method"].unique().item(),
color="blue", linestyle="dotted"
)
if verbose:
for _k in [1, 20]:
_acc_info_printer(bl, tmp, _k)
res_out += list(zip(
tmp["k"], tmp["top_k_acc"], [c["scoring_method"].unique().item()] * max_k,
[a["ms2scorer"].unique().item()] * max_k
))
# ---
# Set some further axis properties
ax.set_xticks(k_ticks)
ax2.set_xticks(k_ticks)
ax.grid(axis="y")
ax2.grid(axis="y")
if (aspect == "portrait") and (idx == (n_scorer - 1)):
ax.set_xlabel("k")
ax2.set_xlabel("k")
elif aspect == "landscape":
ax2.set_xlabel("k")
# There should be only a single legend in the figure
# TODO: Would be nice to get that one below the plots
axrr[0, 0].legend()
plt.tight_layout()
return pd.DataFrame(res_out, columns=["k", "avg_top_k_acc", "scoring_method", "ms2scorer"])
def plot__03__b(
res__baseline: List[pd.DataFrame],
res__ssvm__2D: List[pd.DataFrame],
res__ssvm__3D: List[pd.DataFrame],
ks: Optional[List[int]] = None,
ctype: str = "improvement",
weighted_average: bool = False,
raise_on_missing_results: bool = True,
label_format: str = ".0f",
verbose: bool = False
):
"""
Plot to illustrate the performance difference between Only MS2 and the four (4) different score integration
approaches. The input structure is the same as for "plot__03__a".
:param res__baseline: list of dataframe, containing the ranking results for Only MS2.
:param res__ssvm__2D: list of dataframe, containing the ranking results for the SSVM approach with 2D fingerprints.
:param res__ssvm__3D: list of dataframe, containing the ranking results for the SSVM approach with 3D fingerprints.
:param ks: list of scalars, top-k values to plot. By default, the variable is set to [1, 20], which means that the
top-1 and top-20 values will be plotted.
:param ctype: string, which statistic should be encoded using the color of the heatmap plot. Choises are:
"improvement": Difference between top-k (score integration) and top-k (baseline) in percentage points.
"gain": Performance gain of top-k (score integration) over top-k (baseline)
"gain_perc": Performance gain of top-k (score integration) over top-k (baseline) in percentages
:param weighted_average: boolean, indicating whether the average the top-k accuracy should be first computed within
each dataset and subsequently averaged across the datasets. If False, than all samples are treated equally and
simply averaged directly across all datasets.
:param raise_on_missing_results: boolean, indicating whether an error should be raised if results are missing. If
False, than only those results which are available for all scoring methods of a particular MS2 base-scorer are
considered for the plots.
:param label_format: string, format string for the labels. Default: Rounded to full number.
:param verbose: boolean, indicating whether all kinds of stuff should be printed, which somehow can be helpful for
debugging.
:return: pd.DataFrame, data shown in the plot for publication.
"""
assert ctype in ["improvement", "gain", "gain_perc"], "Invalid ctype value: '%s'" % ctype
ctype_labels = {
"improvement": "Top-k acc. improvement (%p)",
"gain": "Performance gain",
"gain_perc": "Performance gain (%)"
}
# Total number of scoring methods in our manuscript
n_methods = 3
# Number of MS2 scorers must be equal
assert len(res__baseline) == len(res__ssvm__2D)
assert len(res__baseline) == len(res__ssvm__3D)
n_scorer = len(res__baseline)
# There should be only one scoring and one top-k accuracy computation method in each dataframe
for k in ["scoring_method", "top_k_method", "ms2scorer"]:
for i in range(n_scorer):
assert res__baseline[i][k].nunique() == 1
assert res__ssvm__2D[i][k].nunique() == 1
assert res__ssvm__3D[i][k].nunique() == 1
# For the SSVM all results should be 8 SSVM models
for i in range(n_scorer):
assert np.all(res__ssvm__2D[i]["n_models"] == 8), "2D: There seems to be SSVM models missing."
assert np.all(res__ssvm__3D[i]["n_models"] == 8), "3D: There seems to be SSVM models missing."
# Get all available results and restrict them if needed by only using the result intersection
res__baseline, res__ssvm__2D, res__ssvm__3D, = _process_dfs__03(
res__baseline, res__ssvm__2D, res__ssvm__3D, raise_on_missing_results
)
# Get number of datasets
datasets = [res__baseline[i]["dataset"].unique().tolist() for i in range(n_scorer)]
# Get a new figure
fig, axrr = plt.subplots(n_scorer, len(ks), figsize=(20, 5 * n_scorer), sharex=False, sharey="row", squeeze=False)
# Plot Top-k curve
if verbose:
print("We expect 4700 result rows")
# For Machine Intelligence we need to write out the content of the figure
_label_df = {}
_color_df = {}
# Do the plotting ...
for i, _res in enumerate(zip(res__baseline, res__ssvm__2D, res__ssvm__3D)):
_ms2scorer_i = _res[0]["ms2scorer"].unique().item()
assert _ms2scorer_i == _res[1]["ms2scorer"].unique().item()
assert _ms2scorer_i == _res[2]["ms2scorer"].unique().item()
if verbose:
print("Rows (MS2-scorer='%s'):" % _ms2scorer_i)
print("Number of samples: %d" % (_res[0]["k"] == 1).sum())
# Top-k accuracy matrices: (1) label matrix and (2) color encoding matrix
lab_val_mat = np.full((len(ks), n_methods, len(datasets[i]) + 1), fill_value=np.nan)
col_val_mat = np.full((len(ks), n_methods, len(datasets[i]) + 1), fill_value=np.nan)
# shape = (
# number_of_ks_to_plot,
# number_of_score_integration_methods,
# number_of_datasets_plus_avg
# )
lab_val_d = {}
for j, k in enumerate(ks):
# Get current axis
ax = axrr[i, j]
# i: Each MS2 scorer is plotted into its own row
# j: Each top-k is plotted into its own column
for l, ds in enumerate(datasets[i]):
# Top-k accuracy as label
for m in range(n_methods):
# Get the top-k values for the current dataset (= MassBank group) and the current value of k
# (top-k). This might be several values, depending on the number of evaluation samples in each
# dataset.
_top_k_values = _res[m][(_res[m]["dataset"] == ds) & (_res[m]["k"] == k)]["top_k_acc"].values
# As label, we use the average performance (WITHIN DATASET).
lab_val_mat[j, m, l] = np.mean(_top_k_values)
if not weighted_average:
lab_val_d[(j, m, l)] = _top_k_values
# Performance gain or improvement as color
for m in range(n_methods):
# Note: The first score integration method is Only MS2 (= baseline)
col_val_mat[j, m, l] = _compute_color(
baseline=lab_val_mat[j, 0, l], other=lab_val_mat[j, m, l], ctype=ctype
)
# Compute average performance (ACROSS THE DATASETS)
if weighted_average:
lab_val_mat[j, :, -1] = np.mean(lab_val_mat[j, :, :-1], axis=1)
else:
for m in range(n_methods):
lab_val_mat[j, m, -1] = np.mean(
np.concatenate(
[lab_val_d[(j, m, l)] for l in range(len(datasets[i]))]
)
)
for m in range(n_methods):
col_val_mat[j, m, -1] = _compute_color(
baseline=lab_val_mat[j, 0, -1], other=lab_val_mat[j, m, -1], ctype=ctype
)
# Wrap the matrices into dataframes
_index = pd.Index(
data=[_res[m]["scoring_method"].unique().item() for m in range(n_methods)],
name="scoring_method"
)
_columns = pd.Index(data=datasets[i] + ["AVG."], name="dataset")
_label_df[(_ms2scorer_i, k)] = | pd.DataFrame(lab_val_mat[j], index=_index, columns=_columns) | pandas.DataFrame |
# This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from GridCal.Engine.basic_structures import BusMode
from GridCal.Engine.Devices.editable_device import EditableDevice, DeviceType, GCProp
from GridCal.Engine.Devices.groupings import Area, Substation, Zone, Country
class Bus(EditableDevice):
"""
The Bus object is the container of all the possible devices that can be attached to
a bus bar or substation. Such objects can be loads, voltage controlled generators,
static generators, batteries, shunt elements, etc.
Arguments:
**name** (str, "Bus"): Name of the bus
**vnom** (float, 10.0): Nominal voltage in kV
**vmin** (float, 0.9): Minimum per unit voltage
**vmax** (float, 1.1): Maximum per unit voltage
**r_fault** (float, 0.0): Resistance of the fault in per unit (SC only)
**x_fault** (float, 0.0): Reactance of the fault in per unit (SC only)
**xpos** (int, 0): X position in pixels (GUI only)
**ypos** (int, 0): Y position in pixels (GUI only)
**height** (int, 0): Height of the graphic object (GUI only)
**width** (int, 0): Width of the graphic object (GUI only)
**active** (bool, True): Is the bus active?
**is_slack** (bool, False): Is this bus a slack bus?
**area** (str, "Default"): Name of the area
**zone** (str, "Default"): Name of the zone
**substation** (str, "Default"): Name of the substation
Additional Properties:
**Qmin_sum** (float, 0): Minimum reactive power of this bus (inferred from the devices)
**Qmax_sum** (float, 0): Maximum reactive power of this bus (inferred from the devices)
**loads** (list, list()): List of loads attached to this bus
**controlled_generators** (list, list()): List of controlled generators attached to this bus
**shunts** (list, list()): List of shunts attached to this bus
**batteries** (list, list()): List of batteries attached to this bus
**static_generators** (list, list()): List of static generators attached to this bus
**measurements** (list, list()): List of measurements
"""
def __init__(self, name="Bus", idtag=None, code='', vnom=10, vmin=0.9, vmax=1.1, r_fault=0.0, x_fault=0.0,
xpos=0, ypos=0, height=0, width=0, active=True, is_slack=False, is_dc=False,
area=None, zone=None, substation=None, country=None, longitude=0.0, latitude=0.0):
EditableDevice.__init__(self,
name=name,
idtag=idtag,
active=active,
code=code,
device_type=DeviceType.BusDevice,
editable_headers={'name': GCProp('', str, 'Name of the bus'),
'idtag': GCProp('', str, 'Unique ID'),
'code': GCProp('', str, 'Some code to further identify the bus'),
'active': GCProp('', bool,
'Is the bus active? used to disable the bus.'),
'is_slack': GCProp('', bool, 'Force the bus to be of slack type.'),
'is_dc': GCProp('', bool, 'Is this bus of DC type?.'),
'Vnom': GCProp('kV', float,
'Nominal line voltage of the bus.'),
'Vmin': GCProp('p.u.', float,
'Lower range of allowed voltage.'),
'Vmax': GCProp('p.u.', float,
'Higher range of allowed range.'),
'r_fault': GCProp('p.u.', float,
'Resistance of the fault.\n'
'This is used for short circuit studies.'),
'x_fault': GCProp('p.u.', float, 'Reactance of the fault.\n'
'This is used for short circuit studies.'),
'x': GCProp('px', float, 'x position in pixels.'),
'y': GCProp('px', float, 'y position in pixels.'),
'h': GCProp('px', float, 'height of the bus in pixels.'),
'w': GCProp('px', float, 'Width of the bus in pixels.'),
'country': GCProp('', DeviceType.CountryDevice, 'Country of the bus'),
'area': GCProp('', DeviceType.AreaDevice, 'Area of the bus'),
'zone': GCProp('', DeviceType.ZoneDevice, 'Zone of the bus'),
'substation': GCProp('', DeviceType.SubstationDevice, 'Substation of the bus.'),
'longitude': GCProp('deg', float, 'longitude of the bus.'),
'latitude': GCProp('deg', float, 'latitude of the bus.')},
non_editable_attributes=['idtag'],
properties_with_profile={'active': 'active_prof'})
# Nominal voltage (kV)
self.Vnom = vnom
# minimum voltage limit
self.Vmin = vmin
# maximum voltage limit
self.Vmax = vmax
# summation of lower reactive power limits connected
self.Qmin_sum = 0
# summation of upper reactive power limits connected
self.Qmax_sum = 0
# short circuit impedance
self.r_fault = r_fault
self.x_fault = x_fault
# is the bus active?
self.active = active
self.active_prof = None
self.country = country
self.area = area
self.zone = zone
self.substation = substation
# List of load s attached to this bus
self.loads = list()
# List of Controlled generators attached to this bus
self.controlled_generators = list()
# List of shunt s attached to this bus
self.shunts = list()
# List of batteries attached to this bus
self.batteries = list()
# List of static generators attached tot this bus
self.static_generators = list()
# List of External grid devices
self.external_grids = list()
# List of measurements
self.measurements = list()
# Bus type
self.type = BusMode.PQ
# Flag to determine if the bus is a slack bus or not
self.is_slack = is_slack
# determined if this bus is an AC or DC bus
self.is_dc = is_dc
# if true, the presence of storage devices turn the bus into a Reference bus in practice
# So that P +jQ are computed
self.dispatch_storage = False
# position and dimensions
self.x = xpos
self.y = ypos
self.h = height
self.w = width
self.longitude = longitude
self.latitude = latitude
def delete_children(self):
"""
Delete all the children
"""
self.batteries.clear()
self.shunts.clear()
self.static_generators.clear()
self.loads.clear()
self.controlled_generators.clear()
def add_device(self, device):
"""
Add device to the bus in the corresponding list
:param device:
:return:
"""
if device.device_type == DeviceType.BatteryDevice:
self.batteries.append(device)
elif device.device_type == DeviceType.ShuntDevice:
self.shunts.append(device)
elif device.device_type == DeviceType.StaticGeneratorDevice:
self.static_generators.append(device)
elif device.device_type == DeviceType.LoadDevice:
self.loads.append(device)
elif device.device_type == DeviceType.GeneratorDevice:
self.controlled_generators.append(device)
elif device.device_type == DeviceType.ExternalGridDevice:
self.external_grids.append(device)
else:
raise Exception('Device type not understood:' + str(device.device_type))
def determine_bus_type(self):
"""
Infer the bus type from the devices attached to it
@return: self.type
"""
if self.is_slack:
# if it is set as slack, set the bus as slack and exit
self.type = BusMode.Slack
return self.type
elif len(self.external_grids) > 0: # there are devices setting this as a slack bus
# count the number of active external grids
ext_on = 0
for elm in self.external_grids:
if elm.active:
ext_on += 1
# if there ar any active external grids, set as slack and exit
if ext_on > 0:
self.type = BusMode.Slack
return self.type
# if we got here, determine what to do...
# count the active and controlled generators
gen_on = 0
for elm in self.controlled_generators:
if elm.active and elm.is_controlled:
gen_on += 1
# count the active and controlled batteries
batt_on = 0
for elm in self.batteries:
if elm.active and elm.is_controlled:
batt_on += 1
shunt_on = 0
for elm in self.shunts:
if elm.active and elm.is_controlled:
shunt_on += 1
if (gen_on + batt_on + shunt_on) > 0:
self.type = BusMode.PV
else:
# Nothing special; set it as PQ
self.type = BusMode.PQ
return self.type
def determine_bus_type_at(self, t):
"""
Infer the bus type from the devices attached to it
:param t: time index
@return: self.type
"""
if self.is_slack:
# if it is set as slack, set the bus as slack and exit
return BusMode.Slack
elif len(self.external_grids) > 0: # there are devices setting this as a slack bus
# count the number of active external grids
ext_on = 0
for elm in self.external_grids:
if elm.active_prof[t]:
ext_on += 1
# if there ar any active external grids, set as slack and exit
if ext_on > 0:
return BusMode.Slack
else:
# if we got here, determine what to do...
# count the active and controlled generators
gen_on = 0
for elm in self.controlled_generators:
if elm.active_prof[t] and elm.is_controlled:
gen_on += 1
# count the active and controlled batteries
batt_on = 0
for elm in self.batteries:
if elm.active_prof[t] and elm.is_controlled:
batt_on += 1
shunt_on = 0
for elm in self.shunts:
if elm.active_prof[t] and elm.is_controlled:
shunt_on += 1
if (gen_on + batt_on + shunt_on) > 0:
return BusMode.PV
else:
# Nothing special; set it as PQ
return BusMode.PQ
return BusMode.PQ
def determine_bus_type_prof(self):
"""
Array of bus types according to the profile
:return: array of bus type numbers
"""
if self.active_prof is not None:
nt = self.active_prof.shape[0]
values = np.zeros(nt, dtype=int)
for t in range(nt):
values[t] = self.determine_bus_type_at(t).value
return values
else:
raise Exception('Asked the profile types with no profile!')
def get_reactive_power_limits(self):
"""
get the summation of reactive power
@return: Qmin, Qmax
"""
Qmin = 0.0
Qmax = 0.0
# count the active and controlled generators
for elm in self.controlled_generators + self.batteries:
if elm.active:
if elm.is_controlled:
Qmin += elm.Qmin
Qmax += elm.Qmax
for elm in self.shunts:
if elm.active:
if elm.is_controlled:
Qmin += elm.Bmin
Qmax += elm.Bmax
return Qmin, Qmax
def initialize_lp_profiles(self):
"""
Dimension the LP var profiles
:return: Nothing
"""
for elm in (self.controlled_generators + self.batteries):
elm.initialize_lp_vars()
def plot_profiles(self, time_profile, ax_load=None, ax_voltage=None, time_series_driver=None, my_index=0):
"""
plot the profiles of this bus
:param time_profile: Master profile of time steps (stored in the MultiCircuit)
:param time_series_driver: time series driver
:param ax_load: Load axis, if not provided one will be created
:param ax_voltage: Voltage axis, if not provided one will be created
:param my_index: index of this object in the time series results
"""
if ax_load is None:
fig = plt.figure(figsize=(12, 8))
fig.suptitle(self.name, fontsize=20)
if time_series_driver is not None:
# 2 plots: load + voltage
ax_load = fig.add_subplot(211)
ax_voltage = fig.add_subplot(212, sharex=ax_load)
else:
# only 1 plot: load
ax_load = fig.add_subplot(111)
ax_voltage = None
show_fig = True
else:
show_fig = False
if time_series_driver is not None:
v = np.abs(time_series_driver.results.voltage[:, my_index])
p = np.abs(time_series_driver.results.S[:, my_index])
t = time_series_driver.results.time
pd.DataFrame(data=v, index=t, columns=['Voltage (p.u.)']).plot(ax=ax_voltage)
| pd.DataFrame(data=p, index=t, columns=['Computed power (p.u.)']) | pandas.DataFrame |
from os.path import join
import numpy as np
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
from src import utils as cutil
def convert_non_monotonic_to_nan(array):
"""Converts a numpy array to a monotonically increasing one.
Args:
array (numpy.ndarray [N,]): input array
Returns:
numpy.ndarray [N,]: some values marked as missing, all non-missing
values should be monotonically increasing
Usage:
>>> convert_non_monotonic_to_nan(np.array([0, 0, 5, 3, 4, 6, 3, 7, 6, 7, 8]))
np.array([ 0., 0., np.nan, 3., np.nan, np.nan, 3., np.nan, 6., 7., 8.])
"""
keep = np.arange(0, len(array))
is_monotonic = False
while not is_monotonic:
is_monotonic_array = np.hstack(
(array[keep][1:] >= array[keep][:-1], np.array(True))
)
is_monotonic = is_monotonic_array.all()
keep = keep[is_monotonic_array]
out_array = np.full_like(array.astype(np.float), np.nan)
out_array[keep] = array[keep]
return out_array
def log_interpolate(array):
"""Interpolates assuming log growth.
Args:
array (numpy.ndarray [N,]): input array with missing values
Returns:
numpy.ndarray [N,]: all missing values will be filled
Usage:
>>> log_interpolate(np.array([0, np.nan, 2, np.nan, 4, 6, np.nan, 7, 8]))
np.array([0, 0, 2, 3, 4, 6, 7, 7, 8])
"""
idx = np.arange(0, len(array))
log_array = np.log(array.astype(np.float32) + 1e-1)
interp_array = np.interp(
x=idx, xp=idx[~np.isnan(array)], fp=log_array[~np.isnan(array)]
)
return np.round(np.exp(interp_array)).astype(int)
DATA_CHINA = cutil.DATA_RAW / "china"
health_dxy_file = join(DATA_CHINA, "DXYArea.csv")
health_jan_file = join(DATA_CHINA, "china_city_health_jan.xlsx")
policy_file = join(DATA_CHINA, "CHN_policy_data_sources.csv")
pop_file = join(DATA_CHINA, "china_city_pop.csv")
output_file = cutil.DATA_PROCESSED / "adm2" / "CHN_processed.csv"
match_file = join(DATA_CHINA, "match_china_city_name_w_adm2.csv")
shp_file = cutil.DATA_INTERIM / "adm" / "adm2" / "adm2.shp"
end_date_file = cutil.CODE / "data" / "cutoff_dates.csv"
end_date = pd.read_csv(end_date_file)
(end_date,) = end_date.loc[end_date["tag"] == "default", "end_date"].values
end_date = str(end_date)
print("End Date: ", end_date)
## Load and clean pre 01/24 data
# load pre 01/24 data
df_jan = pd.read_excel(health_jan_file, sheet_name=None)
# process pre 1/24 data
df_jan_merged = pd.DataFrame(columns=["adm0_name", "adm1_name", "adm2_name", "date"])
for old_col, new_col in zip(
["confirmed", "death", "recovery"],
["cum_confirmed_cases", "cum_deaths", "cum_recoveries"],
):
melted = (
df_jan[old_col]
.melt(
id_vars=["adm0_name", "adm1_name", "adm2_name"],
var_name="date",
value_name=new_col,
)
.dropna()
)
df_jan_merged = pd.merge(
df_jan_merged,
melted,
how="outer",
on=["adm0_name", "adm1_name", "adm2_name", "date"],
)
df_jan_merged = df_jan_merged.loc[df_jan_merged["adm2_name"] != "Unknown", :]
## Load and clean main data (scraped), harmonize city names
# data downloaded from
# https://github.com/BlankerL/DXY-COVID-19-Data
df = pd.read_csv(health_dxy_file)
# drop aggregates and cases in other countries
df = df.loc[df["countryEnglishName"] == "China", :]
df = df.loc[df["cityName"].notna(), :]
# df.describe(include='all') # quick summary
# df['provinceName'].unique() # looks clean
# df['provinceEnglishName'].unique() # looks clean
# df['cityName'].unique() # looks messy, will keep raw data
# # check unique English name for obs with the same Chinese cityName
# for cn_name, group in df.groupby(['provinceName', 'cityName']):
# en_name = group['cityEnglishName'].unique()
# if len(en_name) > 1:
# print(cn_name)
# print(en_name)
# print(group['cityEnglishName'].shape)
# print(group['cityEnglishName'].value_counts())
# # check all english city names
# for en_name, _ in df.groupby(['provinceEnglishName', 'cityEnglishName']):
# print(en_name)
# # check all chinese city names
# for cn_name, _ in df.groupby(['provinceName', 'cityName']):
# print(cn_name)
# set and sort index
df = df.set_index(["provinceName", "cityName"]).sort_index()
# record notes
df.loc[:, "notes"] = np.nan
# recode city English names based on Chinese names
cityEnglishName_dict = {
# 'provinceName', 'cityName': 'cityEnglishName', 'assignedToCity'
# for prisons
("浙江省", "省十里丰监狱"): ("Shilifeng Prison", "prison"),
("山东省", "任城监狱"): ("Rencheng Prison", "prison"),
("湖北省", "监狱系统"): ("Prison", "prison"),
# for harmonizing names
("四川省", "凉山"): ("Liangshan Yi Autonomous Prefecture", np.nan),
("四川省", "凉山州"): ("Liangshan Yi Autonomous Prefecture", np.nan),
# for imported cases
(None, "境外输入人员"): ("International Imported Cases", "imported"),
(None, "外地来沪人员"): ("Domestic Imported Cases", "imported"),
(None, "武汉来京人员"): ("Domestic Imported Cases", "imported"),
(None, "外地来京人员"): ("Domestic Imported Cases", "imported"),
(None, "外地来津"): ("Domestic Imported Cases", "imported"),
(None, "外地来津人员"): ("Domestic Imported Cases", "imported"),
(None, "外地来穗人员"): ("Domestic Imported Cases", "imported"),
(None, "外地来粤人员"): ("Domestic Imported Cases", "imported"),
# for unknown
(None, "待明确地区"): ("Unknown", "unknown"),
(None, "未明确地区"): ("Unknown", "unknown"),
(None, "未知"): ("Unknown", "unknown"),
(None, "未知地区"): ("Unknown", "unknown"),
(None, "不明地区"): ("Unknown", "unknown"),
(None, "未明确地区"): ("Unknown", "unknown"),
(None, "待明确"): ("Unknown", "unknown"),
}
# clean up cityEnglishName
for cn_name, values in cityEnglishName_dict.items():
cn_name = tuple(slice(s) if s is None else s for s in cn_name)
df.loc[cn_name, ["cityEnglishName", "notes"]] = values
# # check remaining missing values
# df.loc[df['cityEnglishName'].isna(), :].index.unique().tolist()
# add new admin level
df.loc[:, "adm3_name"] = "N/A"
# recode city English names based on Chinese names
cityEnglishName_dict = {
("上海市", "金山"): "Jinshan District",
("云南省", "红河"): "Honghe",
("云南省", "西双版纳州"): "Xishuangbanna",
("内蒙古自治区", "赤峰市松山区"): ("Chifeng", "Songshan"),
("内蒙古自治区", "赤峰市林西县"): ("Chifeng", "Linxi"),
("内蒙古自治区", "通辽市经济开发区"): "Tongliao",
("内蒙古自治区", "鄂尔多斯东胜区"): ("Ordos", "Dongsheng"),
("内蒙古自治区", "鄂尔多斯鄂托克前旗"): ("Ordos", "Etuokeqianqi"),
("内蒙古自治区", "锡林郭勒"): "Xilingol League",
("内蒙古自治区", "锡林郭勒盟"): "Xilingol League",
("内蒙古自治区", "锡林郭勒盟二连浩特"): ("Xilingol League", "Erlianhaote"),
("内蒙古自治区", "锡林郭勒盟锡林浩特"): ("Xilingol League", "Xilinhaote"),
("北京市", "石景山"): "Shijingshan District",
("北京市", "西城"): "Xicheng District",
("北京市", "通州"): "Tongzhou District",
("北京市", "门头沟"): "Mentougou District",
("北京市", "顺义"): "Shunyi District",
(
"新疆维吾尔自治区",
"石河子",
): "Shihezi, Xinjiang Production and Construction Corps 8th Division",
("新疆维吾尔自治区", "第七师"): "Xinjiang Production and Construction Corps 7th Division",
("新疆维吾尔自治区", "第九师"): "Xinjiang Production and Construction Corps 9th Division",
(
"新疆维吾尔自治区",
"第八师",
): "Shihezi, Xinjiang Production and Construction Corps 8th Division",
(
"新疆维吾尔自治区",
"第八师石河子",
): "Shihezi, Xinjiang Production and Construction Corps 8th Division",
(
"新疆维吾尔自治区",
"第八师石河子市",
): "Shihezi, Xinjiang Production and Construction Corps 8th Division",
("新疆维吾尔自治区", "第六师"): "Xinjiang Production and Construction Corps 6th Division",
("新疆维吾尔自治区", "胡杨河"): (
"Xinjiang Production and Construction Corps 7th Division",
"Huyanghe",
),
("新疆维吾尔自治区", "阿克苏"): "Akesu",
("河北省", "邯郸市"): "Handan",
("河南省", "邓州"): "Zhengzhou",
("河南省", "长垣"): "Changyuan",
("河南省", "长垣县"): "Changyuan",
("河南省", "鹤壁市"): "Hebi",
("海南省", "陵水县"): "Lingshui Li Autonomous County",
("甘肃省", "白银市"): "Baiyin",
("甘肃省", "金昌市"): "Jinchang",
("重庆市", "石柱"): "Shizhu Tujia Autonomous County",
("重庆市", "秀山"): "Xiushan Tujia and Miao Autonomous County",
("重庆市", "酉阳"): "Youyang Tujia and Miao Autonomous County",
("青海省", "西宁市"): "Xining",
# this is not missing but a typo in the original dataset
("河南省", "邓州"): "Dengzhou",
("江苏省", "淮安"): "Huai'an",
}
# clean up cityEnglishName
for cn_name, values in cityEnglishName_dict.items():
if isinstance(values, str):
df.loc[cn_name, "cityEnglishName"] = values
elif len(values) == 2:
df.loc[cn_name, ["cityEnglishName", "adm3_name"]] = values
# rename variables
df.rename(
{
"provinceEnglishName": "adm1_name",
"cityEnglishName": "adm2_name",
"city_confirmedCount": "cum_confirmed_cases",
"city_deadCount": "cum_deaths",
"city_curedCount": "cum_recoveries",
},
axis=1,
inplace=True,
)
# extract dates
df.loc[:, "updateTime"] = pd.to_datetime(df["updateTime"])
df.loc[:, "date"] = df["updateTime"].dt.date
df.loc[:, "date"] = pd.to_datetime(df["date"])
# choose the latest observation in each day
df = df.sort_values(by=["updateTime"])
df = df.drop_duplicates(
subset=["adm1_name", "adm2_name", "adm3_name", "date"], keep="last"
)
# subset columns
df = df.loc[
:,
[
"adm1_name",
"adm2_name",
"adm3_name",
"date",
"notes",
"cum_confirmed_cases",
"cum_deaths",
"cum_recoveries",
],
]
# for big cities, adjust adm level
mask = df["adm1_name"].isin(["Shanghai", "Beijing", "Tianjin", "Chongqing"])
df.loc[mask, "adm3_name"] = df.loc[mask, "adm2_name"].tolist()
df.loc[mask, "adm2_name"] = df.loc[mask, "adm1_name"].tolist()
# drop cases unassigned to cities
df = df.loc[df["notes"] != "prison", :]
df = df.loc[
~df["adm2_name"].isin(
["International Imported Cases", "Domestic Imported Cases", "Unknown"]
),
:,
]
# aggregate to city level
df = (
df.groupby(["adm1_name", "adm2_name", "date"])
.agg(
cum_confirmed_cases=pd.NamedAgg(
column="cum_confirmed_cases", aggfunc=np.nansum
),
cum_deaths=pd.NamedAgg(column="cum_deaths", aggfunc=np.nansum),
cum_recoveries=pd.NamedAgg(column="cum_recoveries", aggfunc=np.nansum),
)
.reset_index()
)
# fill adm0_name variable
df.loc[:, "adm0_name"] = "CHN"
## Merge with pre 01/24 data, create balanced panel
# merge with pre 1/24 data
df = | pd.concat([df, df_jan_merged], sort=False) | pandas.concat |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from empiricaldist import Pmf
from scipy.stats import gaussian_kde
from scipy.stats import binom
from scipy.stats import gamma
from scipy.stats import poisson
def values(series):
"""Make a series of values and the number of times they appear.
Returns a DataFrame because they get rendered better in Jupyter.
series: Pandas Series
returns: Pandas DataFrame
"""
series = series.value_counts(dropna=False).sort_index()
series.index.name = 'values'
series.name = 'counts'
return pd.DataFrame(series)
def write_table(table, label, **options):
"""Write a table in LaTex format.
table: DataFrame
label: string
options: passed to DataFrame.to_latex
"""
filename = f'tables/{label}.tex'
fp = open(filename, 'w')
s = table.to_latex(**options)
fp.write(s)
fp.close()
def write_pmf(pmf, label):
"""Write a Pmf object as a table.
pmf: Pmf
label: string
"""
df = pd.DataFrame()
df['qs'] = pmf.index
df['ps'] = pmf.values
write_table(df, label, index=False)
def underride(d, **options):
"""Add key-value pairs to d only if key is not in d.
d: dictionary
options: keyword args to add to d
"""
for key, val in options.items():
d.setdefault(key, val)
return d
def decorate(**options):
"""Decorate the current axes.
Call decorate with keyword arguments like
decorate(title='Title',
xlabel='x',
ylabel='y')
The keyword arguments can be any of the axis properties
https://matplotlib.org/api/axes_api.html
"""
ax = plt.gca()
ax.set(**options)
handles, labels = ax.get_legend_handles_labels()
if handles:
ax.legend(handles, labels)
plt.tight_layout()
def savefig(root, **options):
"""Save the current figure.
root: string filename root
options: passed to plt.savefig
"""
format = options.pop('format', None)
if format:
formats = [format]
else:
formats = ['pdf', 'png']
for format in formats:
fname = f'figs/{root}.{format}'
plt.savefig(fname, **options)
def make_die(sides):
"""Pmf that represents a die with the given number of sides.
sides: int
returns: Pmf
"""
outcomes = np.arange(1, sides+1)
die = Pmf(1/sides, outcomes)
return die
def add_dist_seq(seq):
"""Distribution of sum of quantities from PMFs.
seq: sequence of Pmf objects
returns: Pmf
"""
total = seq[0]
for other in seq[1:]:
total = total.add_dist(other)
return total
def make_mixture(pmf, pmf_seq):
"""Make a mixture of distributions.
pmf: mapping from each hypothesis to its probability
(or it can be a sequence of probabilities)
pmf_seq: sequence of Pmfs, each representing
a conditional distribution for one hypothesis
returns: Pmf representing the mixture
"""
df = pd.DataFrame(pmf_seq).fillna(0).transpose()
df *= np.array(pmf)
total = df.sum(axis=1)
return Pmf(total)
def summarize(posterior, digits=3, prob=0.9):
"""Print the mean and CI of a distribution.
posterior: Pmf
digits: number of digits to round to
prob: probability in the CI
"""
mean = np.round(posterior.mean(), 3)
ci = posterior.credible_interval(prob)
print (mean, ci)
def outer_product(s1, s2):
"""Compute the outer product of two Series.
First Series goes down the rows;
second goes across the columns.
s1: Series
s2: Series
return: DataFrame
"""
a = np.multiply.outer(s1.to_numpy(), s2.to_numpy())
return pd.DataFrame(a, index=s1.index, columns=s2.index)
def make_uniform(qs, name=None, **options):
"""Make a Pmf that represents a uniform distribution.
qs: quantities
name: string name for the quantities
options: passed to Pmf
returns: Pmf
"""
pmf = Pmf(1.0, qs, **options)
pmf.normalize()
if name:
pmf.index.name = name
return pmf
def make_joint(s1, s2):
"""Compute the outer product of two Series.
First Series goes across the columns;
second goes down the rows.
s1: Series
s2: Series
return: DataFrame
"""
X, Y = np.meshgrid(s1, s2)
return | pd.DataFrame(X*Y, columns=s1.index, index=s2.index) | pandas.DataFrame |
import os
import datetime as dt
import glob
import fnmatch
import struct
import pandas as pd
from tensorboardX.proto import event_pb2
def get_event_files(directory, recursive=True):
""" Return the full path to all files in directory matching the specified
pattern.
Arguments:
directory (str): Directory path in which to look
recursive (bool): Searches recursively if True
Returns:
A list of matching file paths
"""
pattern = "events.*"
matches = list()
if recursive is False:
it = glob.iglob(os.path.join(directory, pattern))
for filename in it:
matches.append(filename)
return matches
# If we want to recurse, use os.walk instead
for root, dirnames, filenames in os.walk(directory):
extend = [os.path.join(root, ss) for ss in
fnmatch.filter(filenames, pattern)]
matches.extend(extend)
return matches
class EventReader(object):
def __init__(self, filename=None, mode="rb"):
self.filename = filename
self.mode = mode
self._fh = None
self._buf = None
self._index = 0
def __enter__(self):
self._fh = open(self.filename, self.mode)
self._buf = self._fh.read()
self._index = 0
return self
def _read_event(self):
# Read the header which tells the length of the event string
header_str = self._buf[self._index: self._index + 8]
header = struct.unpack('Q', header_str)
self._index += 12
# Read the event string
header_len = int(header[0])
event_str = self._buf[self._index: self._index + header_len]
self._index += (header_len + 4)
# Parse event string
ev = event_pb2.Event()
try:
ev.ParseFromString(event_str)
except:
raise
return ev
def read(self):
events = []
while self._index < len(self._buf):
event = self._read_event()
if event is not None:
events.append(event)
return events
def __exit__(self, *args):
self._fh.close()
def get_summary_events(event_file):
with EventReader(event_file, "rb") as fh:
events = fh.read()
for event in events:
if event.HasField("summary"):
yield event
def get_valid_event_files(event_files):
valid = []
for ef in event_files:
try:
it = get_summary_events(ef)
ev = next(iter(it))
except:
continue
valid.append(ef)
return valid
def get_scalar_dataframe(event_file, maxlen=200, load=True, store=True):
df_fname = os.path.join(os.path.dirname(event_file), "extracted.csv")
if load and os.path.isfile(df_fname):
df = pd.read_csv(df_fname)
return df
values = dict()
for event in get_summary_events(event_file):
for v in event.summary.value:
if v.HasField("simple_value"):
tag = v.tag
values.setdefault(tag, list()).append((dt.datetime.fromtimestamp(event.wall_time),
event.step, v.simple_value))
df = None
for nm, value in values.items():
if len(value) > maxlen:
continue
if df is None:
df = | pd.DataFrame(value, columns=["wall_time", "step", nm]) | pandas.DataFrame |
import os
import re
from datetime import datetime
from multiprocessing.pool import ThreadPool
import pandas as pd
import requests
from bs4 import BeautifulSoup
from .config_file import ConfigFile
from .file_handler import FileHandler
class HttpRetrieval(ConfigFile):
URL = 'https://nomads.ncep.noaa.gov/pub/data/nccf/com/hrrr/prod' \
'/hrrr.{}/conus/'
FILE_PATTERN = r'hrrr\.t\d\dz\.wrfsfcf{}.grib2'
NUMBER_REQUESTS = 2
REQUEST_TIMEOUT = 600
def __init__(self, overwrite=False, config=None, external_logger=None):
super().__init__(
__name__, config=config, external_logger=external_logger
)
if self._config is not None and 'output' in self._config:
if 'num_requests' in self._config['output'].keys():
self._number_requests = int(
self._config['output']['num_requests']
)
if 'request_timeout' in self._config['output'].keys():
self._request_timeout = int(
self._config['output']['request_timeout'])
self.overwrite = overwrite
self.date_folder = True
self.forecast_hour = None
@property
def number_requests(self):
return getattr(self, '_number_requests', HttpRetrieval.NUMBER_REQUESTS)
@property
def request_timeout(self):
return getattr(self, '_request_timeout', HttpRetrieval.REQUEST_TIMEOUT)
@property
def forecast_str(self):
"""Turn a list of forecast hours to strings with 2 digits. For
example [0, 1] becomes ['00', '01']
Returns:
list: None or list of strings
"""
if self.forecast_hour is None:
return None
else:
return [str(x).zfill(2) for x in self.forecast_hour]
@property
def regex_file_name(self):
"""Create the regex string to match the file names
Returns:
str: file name pattern to match
"""
if self.forecast_str is None:
return self.FILE_PATTERN.format('\\d\\d')
else:
return self.FILE_PATTERN.format('({})'.format('|'.join(self.forecast_str)))
@property
def url_date(self):
return HttpRetrieval.URL.format(
self.start_date.strftime(FileHandler.SINGLE_DAY_FORMAT)
)
def output_folder(self):
if self.date_folder:
self.folder_date = FileHandler.folder_name(self.start_date)
out_path = os.path.join(self.output_dir, self.folder_date)
if not os.path.isdir(out_path):
os.mkdir(out_path)
self.log.info('mkdir {}'.format(out_path))
else:
out_path = self.output_dir
self.out_path = out_path
def fetch_by_date(self, start_date, end_date, forecast_hour=None):
"""Fetch data from NOMADS between a date range for a given forecast hour.
The default will download all forecast hours.
Args:
start_date (str): start date string
end_date (str): end date string
forecast_hour (list, optional): list of forecast hours. Defaults to None.
Returns:
list: list of responses for data downloads
"""
self.log.info('Retrieving data from the http site')
self.start_date = start_date
self.end_date = end_date
self.forecast_hour = forecast_hour
self.check_dates()
self.output_folder()
df = self.parse_html_for_files()
if len(df) == 0:
self.log.warning('No files found that match request')
return None
self.log.debug('Generating requests')
pool = ThreadPool(processes=self.number_requests)
self.log.info('Sendings {} requests'.format(len(df)))
# map_async will convert the iterable to a list right away and wait
# for the requests to finish before continuing
res = pool.map(self.fetch_from_url, df.url.to_list())
self.log.info(
'{} -- Done with downloads'.format(datetime.now().isoformat()))
return res
def parse_html_for_files(self):
"""Parse the url from NOMADS with BeautifulSoup and look for matching
filenames.
Returns:
pd.DataFrame: data frame of files that match the pattern
"""
# get the html text
self.log.debug('Requesting html text from {}'.format(self.url_date))
page = requests.get(self.url_date).text
soup = BeautifulSoup(page, 'html.parser')
# parse
columns = ['modified', 'file_date', 'file_name', 'out_file', 'new_file', 'url', 'size']
df = pd.DataFrame(columns=columns)
regex = re.compile(self.regex_file_name)
for node in soup.find_all('a'):
if node.get('href').endswith('grib2'):
file_name = node.get('href')
result = regex.match(file_name)
if result:
# matched a file name so get more information about it
file_url = self.url_date + file_name
data = node.next_element.next_element.strip()
el = data.split(' ')
modified = pd.to_datetime(
el[0] + ' ' + el[1]).tz_localize(tz='UTC')
size = el[3]
out_file = os.path.join(self.out_path, file_name)
df = df.append({
'modified': modified,
'file_date': FileHandler.folder_to_date(self.folder_date, file_name),
'file_name': file_name,
'out_file': out_file,
'new_file': not os.path.exists(out_file),
'url': file_url,
'size': size
}, ignore_index=True)
self.log.debug('Found {} matching files'.format(len(df)))
if len(df) == 0:
return df
if not self.overwrite:
df = df[df.new_file]
self.log.debug(
'{} files do not exist in output directory'.format(len(df)))
# parse by the date
idx = (df['file_date'] >= self.start_date) & \
(df['file_date'] <= self.end_date)
df = df.loc[idx]
self.log.debug(
'Found {} files between start and end date'.format(len(df)))
return df
def fetch_from_url(self, uri):
"""
Fetch the file at the uri and save the file to the out_path
Args:
uri: url of the file
Returns:
False if failed or path to saved file
"""
success = False
try:
self.log.debug('Fetching {}'.format(uri))
r = requests.get(uri, timeout=self.request_timeout)
if r.status_code == 200:
f = r.url.split('/')[-1]
out_file = os.path.join(self.out_path, f)
with open(out_file, 'wb') as f:
f.write(r.content)
f.close()
self.log.debug('Saved to {}'.format(out_file))
success = out_file
except Exception as e:
self.log.warning('Problem processing response')
self.log.warning(e)
return success
def check_dates(self):
# if self.start_date is not None:
self.start_date = | pd.to_datetime(self.start_date) | pandas.to_datetime |
##############################################################################
#
# An example of creating a chart with Pandas and XlsxWriter.
#
# Copyright 2013, <NAME>, <EMAIL>
#
import random
import pandas as pd
from vincent.colors import brews
# Some sample data to plot.
cat_4 = ['Metric_' + str(x) for x in range(1, 9)]
index_4 = ['Data 1', 'Data 2', 'Data 3', 'Data 4']
data_3 = {}
for cat in cat_4:
data_3[cat] = [random.randint(10, 100) for x in index_4]
# Create a Pandas dataframe from the data.
df = pd.DataFrame(data_3, index=index_4)
# Create a Pandas Excel writer using XlsxWriter as the engine.
excel_file = 'stacked_column.xlsx'
sheet_name = 'Sheet1'
writer = | pd.ExcelWriter(excel_file, engine='xlsxwriter') | pandas.ExcelWriter |
"""
TODO:
- Rewrite Paris
- Random Forest filtering paper
- iForest
- OCSVM
- Fix bugs
- If threshold is too low to reject all pixels, send warning and return minimum dataset
- Add oversampling
"""
import pandas as pd
import numpy as np
from sklearn.cluster import MiniBatchKMeans
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import (
StratifiedKFold,
StratifiedShuffleSplit
)
from imblearn.under_sampling.base import BaseCleaningSampler
from copy import deepcopy
from collections import Counter
class MBKMeansFilter(BaseCleaningSampler):
"""My own method"""
def __init__(self, n_splits=5, granularity=5, method='obs_percent', threshold=0.5, random_state=None):
assert method in ['obs_percent', 'mislabel_rate'], 'method must be either \'obs_percent\', \'mislabel_rate\''
super().__init__(sampling_strategy='all')
self.n_splits = n_splits
self.granularity = granularity
self.method = method
self.threshold = threshold
self.random_state = random_state
def _fit_resample(self, X, y, filters):
#assert X.shape[0]==y.shape[0], 'X and y must have the same length.'
## cluster data
#print('n_splits:', self.n_splits, ', granularity:', self.granularity, ', method:', self.method, ', threshold:', self.threshold, ', random_state:', self.random_state)
self.filters = deepcopy(filters)
index = np.arange(len(y))
clusters_list = []
index_list = []
self.kmeans = {}
for analysis_label in np.unique(y):
label_indices = index[y==analysis_label]
X_label = X[y==analysis_label]
clusters, kmeans = self._KMeans_clustering(X_label)
self.kmeans[analysis_label] = kmeans
index_list.append(label_indices)
clusters_list.append(clusters)
## cluster labels
cluster_col = pd.Series(
data=np.concatenate(clusters_list),
index=np.concatenate(index_list),
name='cluster')\
.sort_index()
## apply filters
label_encoder = LabelEncoder()
y_ = label_encoder.fit_transform(y)
self.stratifiedkfold = StratifiedKFold(n_splits = self.n_splits, shuffle=True, random_state=self.random_state)
self.filter_list = {}
filter_outputs = {}
for n, (_, split) in enumerate(self.stratifiedkfold.split(X, y_)):
for name, clf in self.filters:
classifier = deepcopy(clf)
classifier.fit(X[split], y_[split])
filter_outputs[f'filter_{n}_{name}'] = classifier.predict(X)
self.filter_list[f'{n}_{name}'] = classifier
## mislabel rate
total_filters = len(filter_outputs.keys())
mislabel_rate = (total_filters - \
np.apply_along_axis(
lambda x: x==y_, 0, pd.DataFrame(filter_outputs).values)\
.astype(int).sum(axis=1)
)/total_filters
## crunch data
mislabel_col = pd.Series(data=mislabel_rate, index=index, name='mislabel_rate')
y_col = pd.Series(data=y, index=index, name='y')
df = cluster_col.to_frame().join(y_col).join(mislabel_col)
df['count'] = 1
df_cluster_info_grouped = df.groupby(['y', 'cluster'])\
.agg({'mislabel_rate':np.mean, 'count':'count'})\
.sort_values(['mislabel_rate'])
df_cluster_info_A = df_cluster_info_grouped.groupby(['y']).cumsum()\
.rename(columns={'count':'cumsum'}).drop(columns=['mislabel_rate'])
df_cluster_info = df_cluster_info_grouped.join(df_cluster_info_A)
if self.method=='mislabel_rate':
df_cluster_info['status'] = df_cluster_info['mislabel_rate']<=self.threshold
elif self.method=='obs_percent':
thresholds = df_cluster_info.groupby('y').max()['cumsum']*self.threshold
actual_thresholds = df_cluster_info[
df_cluster_info['cumsum']/thresholds>=1
]['cumsum'].groupby('y').min()
df_cluster_info['status'] = df_cluster_info['cumsum']/actual_thresholds<=1
# always accept cluster with lowest mislabel rate for each class by default
index_keys = df_cluster_info.reset_index().groupby('y').apply(
lambda x: x.sort_values('mislabel_rate').iloc[0]
)[['y','cluster']].values
df_cluster_info.loc[[tuple(i) for i in index_keys], 'status'] = True
results = df.join(df_cluster_info['status'], on=['y','cluster'])
self.status = results['status'].values
return X[self.status], y[self.status]
def fit(self, X, y, filters):
"""Fits filter to X, y."""
self._fit_resample(X, y, filters)
return self
def resample(self, X, y):
index = np.arange(len(y))
clusters_list = []
index_list = []
for analysis_label in np.unique(y):
label_indices = index[y==analysis_label]
X_label = X[y==analysis_label]
clusters = self.kmeans[analysis_label].predict(X_label)
index_list.append(label_indices)
clusters_list.append(clusters)
## cluster labels
cluster_col = pd.Series(
data=np.concatenate(clusters_list),
index=np.concatenate(index_list),
name='cluster')\
.sort_index()
## apply filters
label_encoder = LabelEncoder()
y_ = label_encoder.fit_transform(y)
filter_outputs = {}
for name, classifier in self.filter_list.items():
filter_outputs[f'filter_{name}'] = classifier.predict(X)
## mislabel rate
total_filters = len(filter_outputs.keys())
mislabel_rate = (total_filters - \
np.apply_along_axis(
lambda x: x==y_, 0, | pd.DataFrame(filter_outputs) | pandas.DataFrame |
from datetime import timedelta
import operator
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import IncompatibleFrequency
from pandas.core.dtypes.common import is_datetime64_dtype, is_datetime64tz_dtype
import pandas as pd
from pandas import (
Categorical,
Index,
IntervalIndex,
Series,
Timedelta,
bdate_range,
date_range,
isna,
)
import pandas._testing as tm
from pandas.core import nanops, ops
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestSeriesFlexArithmetic:
@pytest.mark.parametrize(
"ts",
[
(lambda x: x, lambda x: x * 2, False),
(lambda x: x, lambda x: x[::2], False),
(lambda x: x, lambda x: 5, True),
(lambda x: tm.makeFloatSeries(), lambda x: tm.makeFloatSeries(), True),
],
)
@pytest.mark.parametrize(
"opname", ["add", "sub", "mul", "floordiv", "truediv", "pow"]
)
def test_flex_method_equivalence(self, opname, ts):
# check that Series.{opname} behaves like Series.__{opname}__,
tser = tm.makeTimeSeries().rename("ts")
series = ts[0](tser)
other = ts[1](tser)
check_reverse = ts[2]
op = getattr(Series, opname)
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
def test_flex_method_subclass_metadata_preservation(self, all_arithmetic_operators):
# GH 13208
class MySeries(Series):
_metadata = ["x"]
@property
def _constructor(self):
return MySeries
opname = all_arithmetic_operators
op = getattr(Series, opname)
m = MySeries([1, 2, 3], name="test")
m.x = 42
result = op(m, 1)
assert result.x == 42
def test_flex_add_scalar_fill_value(self):
# GH12723
s = Series([0, 1, np.nan, 3, 4, 5])
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
tm.assert_series_equal(res, exp)
pairings = [(Series.div, operator.truediv, 1), (Series.rdiv, ops.rtruediv, 1)]
for op in ["add", "sub", "mul", "pow", "truediv", "floordiv"]:
fv = 0
lop = getattr(Series, op)
lequiv = getattr(operator, op)
rop = getattr(Series, "r" + op)
# bind op at definition time...
requiv = lambda x, y, op=op: getattr(operator, op)(y, x)
pairings.append((lop, lequiv, fv))
pairings.append((rop, requiv, fv))
@pytest.mark.parametrize("op, equiv_op, fv", pairings)
def test_operators_combine(self, op, equiv_op, fv):
def _check_fill(meth, op, a, b, fill_value=0):
exp_index = a.index.union(b.index)
a = a.reindex(exp_index)
b = b.reindex(exp_index)
amask = isna(a)
bmask = isna(b)
exp_values = []
for i in range(len(exp_index)):
with np.errstate(all="ignore"):
if amask[i]:
if bmask[i]:
exp_values.append(np.nan)
continue
exp_values.append(op(fill_value, b[i]))
elif bmask[i]:
if amask[i]:
exp_values.append(np.nan)
continue
exp_values.append(op(a[i], fill_value))
else:
exp_values.append(op(a[i], b[i]))
result = meth(a, b, fill_value=fill_value)
expected = Series(exp_values, exp_index)
tm.assert_series_equal(result, expected)
a = Series([np.nan, 1.0, 2.0, 3.0, np.nan], index=np.arange(5))
b = Series([np.nan, 1, np.nan, 3, np.nan, 4.0], index=np.arange(6))
result = op(a, b)
exp = equiv_op(a, b)
tm.assert_series_equal(result, exp)
_check_fill(op, equiv_op, a, b, fill_value=fv)
# should accept axis=0 or axis='rows'
op(a, b, axis=0)
class TestSeriesArithmetic:
# Some of these may end up in tests/arithmetic, but are not yet sorted
def test_add_series_with_period_index(self):
rng = pd.period_range("1/1/2000", "1/1/2010", freq="A")
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected.iloc[1::2] = np.nan
tm.assert_series_equal(result, expected)
result = ts + _permute(ts[::2])
tm.assert_series_equal(result, expected)
msg = "Input has different freq=D from PeriodIndex\\(freq=A-DEC\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
ts + ts.asfreq("D", how="end")
@pytest.mark.parametrize(
"target_add,input_value,expected_value",
[
("!", ["hello", "world"], ["hello!", "world!"]),
("m", ["hello", "world"], ["hellom", "worldm"]),
],
)
def test_string_addition(self, target_add, input_value, expected_value):
# GH28658 - ensure adding 'm' does not raise an error
a = Series(input_value)
result = a + target_add
expected = Series(expected_value)
tm.assert_series_equal(result, expected)
def test_divmod(self):
# GH#25557
a = Series([1, 1, 1, np.nan], index=["a", "b", "c", "d"])
b = Series([2, np.nan, 1, np.nan], index=["a", "b", "d", "e"])
result = a.divmod(b)
expected = divmod(a, b)
tm.assert_series_equal(result[0], expected[0])
tm.assert_series_equal(result[1], expected[1])
result = a.rdivmod(b)
expected = divmod(b, a)
tm.assert_series_equal(result[0], expected[0])
tm.assert_series_equal(result[1], expected[1])
@pytest.mark.parametrize("index", [None, range(9)])
def test_series_integer_mod(self, index):
# GH#24396
s1 = Series(range(1, 10))
s2 = Series("foo", index=index)
msg = "not all arguments converted during string formatting"
with pytest.raises(TypeError, match=msg):
s2 % s1
def test_add_with_duplicate_index(self):
# GH14227
s1 = Series([1, 2], index=[1, 1])
s2 = Series([10, 10], index=[1, 2])
result = s1 + s2
expected = Series([11, 12, np.nan], index=[1, 1, 2])
tm.assert_series_equal(result, expected)
def test_add_na_handling(self):
from datetime import date
from decimal import Decimal
s = Series(
[Decimal("1.3"), Decimal("2.3")], index=[date(2012, 1, 1), date(2012, 1, 2)]
)
result = s + s.shift(1)
result2 = s.shift(1) + s
assert isna(result[0])
assert isna(result2[0])
def test_add_corner_cases(self, datetime_series):
empty = Series([], index=Index([]), dtype=np.float64)
result = datetime_series + empty
assert np.isnan(result).all()
result = empty + empty.copy()
assert len(result) == 0
# FIXME: dont leave commented-out
# TODO: this returned NotImplemented earlier, what to do?
# deltas = Series([timedelta(1)] * 5, index=np.arange(5))
# sub_deltas = deltas[::2]
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas
# float + int
int_ts = datetime_series.astype(int)[:-5]
added = datetime_series + int_ts
expected = Series(
datetime_series.values[:-5] + int_ts.values,
index=datetime_series.index[:-5],
name="ts",
)
tm.assert_series_equal(added[:-5], expected)
def test_mul_empty_int_corner_case(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({"x": 0.0})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=["x"]))
def test_sub_datetimelike_align(self):
# GH#7500
# datetimelike ops need to align
dt = Series(date_range("2012-1-1", periods=3, freq="D"))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0), timedelta(0), pd.NaT])
# name is reset
result = dt2 - dt
tm.assert_series_equal(result, expected)
expected = Series(expected, name=0)
result = (dt2.to_frame() - dt.to_frame())[0]
tm.assert_series_equal(result, expected)
def test_alignment_doesnt_change_tz(self):
# GH#33671
dti = pd.date_range("2016-01-01", periods=10, tz="CET")
dti_utc = dti.tz_convert("UTC")
ser = Series(10, index=dti)
ser_utc = Series(10, index=dti_utc)
# we don't care about the result, just that original indexes are unchanged
ser * ser_utc
assert ser.index is dti
assert ser_utc.index is dti_utc
def test_arithmetic_with_duplicate_index(self):
# GH#8363
# integer ops with a non-unique index
index = [2, 2, 3, 3, 4]
ser = Series(np.arange(1, 6, dtype="int64"), index=index)
other = Series(np.arange(5, dtype="int64"), index=index)
result = ser - other
expected = Series(1, index=[2, 2, 3, 3, 4])
tm.assert_series_equal(result, expected)
# GH#8363
# datetime ops with a non-unique index
ser = Series(date_range("20130101 09:00:00", periods=5), index=index)
other = Series( | date_range("20130101", periods=5) | pandas.date_range |
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 20 12:46:41 2018
@author: MichaelEK
"""
import numpy as np
from os import path
import pandas as pd
from pdsql.mssql import rd_sql
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from datetime import datetime
import matplotlib.ticker as plticker
import lowflows as lf
plt.ioff()
loc = plticker.MaxNLocator(integer=True)
datetime1 = pd.Timestamp.today()
#date1 = pd.Timestamp(datetime1.date())
pd.options.display.max_columns = 10
#####################################
### Parameters
usm_server = 'sql02prod'
usm_database = 'usm'
site_table = 'Site'
site_attr_table = 'SiteAttribute'
bad_sites = {'66101': '14240260', '65104': '165104', '69650': '696501'}
irr_mons1 = [10, 11, 12]
irr_mons2 = [1, 2, 3, 4]
include_flow_methods = ['Correlated from Telem', 'Gauged', 'Telemetered', 'Visually Gauged', 'GW manual']
##color palettes
full_color = sns.color_palette('Blues')
partial_color = sns.color_palette('Greens')
no_color = sns.color_palette('Greys')
base_dir = path.split(path.realpath(path.dirname(__file__)))[0]
export_path = path.join(base_dir, 'lowflow_plots')
#export_sel2 = 'lowflow_restr_2017-10-01.csv'
####################################
### Set up time ranges
mon_now = datetime1.month - 1
year_now = datetime1.year
if mon_now in irr_mons1:
from_date = '{year}-10-01'.format(year=year_now)
elif mon_now in irr_mons2:
from_date = '{year}-10-01'.format(year=year_now - 1)
else:
from_date = '{year}-05-01'.format(year=year_now)
end_mon_now = datetime1 - | pd.DateOffset(months=1) | pandas.DateOffset |
""" Ontology builder v2 - explicit definition! Reads a csv file with these columns: Full Label, Label, Code, Type, [Ancestor_Code, Ancestor_Type]* and outputs an i2b2 ontology file. Filenames are hardcoded right now, should be runtime parameters (TODO!).
This is the non-recursive ontology builder which started as a tool to just convert CSV tables into the less intuitive ontology table format.
But then I added stuff specifically for flowsheet value sets and value types, so it's gotten a bit bloated.
BUGS:
* Path doesn't work
TODO:
* Value sets in questionaires
* Modifiers for comment fields (presence or absence)
By <NAME>, PhD 10/2017
"""
import numpy as np
import pandas as pd
from ontology_tools import metadataxml as mdx
path_in = '/Users/jeffklann/Dropbox (Partners HealthCare)/HMS/Projects/CONCERN/ont_dev/flowsheet_data.csv'
path_in_vs = '/Users/jeffklann/Dropbox (Partners HealthCare)/HMS/Projects/CONCERN/ont_dev/flowsheet_row_customlist.csv'
path_in_type = '/Users/jeffklann/Dropbox (Partners HealthCare)/HMS/Projects/CONCERN/ont_dev/flowsheet_row_value_type.csv'
path_in_notes = '/Users/jeffklann/Dropbox (Partners HealthCare)/HMS/Projects/CONCERN/ont_dev/notestypes.csv'
path_out = '/Users/jeffklann/Dropbox (Partners HealthCare)/HMS/Projects/CONCERN/ont_dev/flowsheet_ont_i2b2.csv'
# Thanks BrycePG on StackOverflow: https://github.com/pandas-dev/pandas/issues/4588
def concat_fixed(ndframe_seq, **kwargs):
"""Like pd.concat but fixes the ordering problem.
Converts Series objects to DataFrames to access join method
Use kwargs to pass through to repeated join method
"""
indframe_seq = iter(ndframe_seq)
# Use the first ndframe object as the base for the final
final_df = pd.DataFrame(next(indframe_seq))
for dataframe in indframe_seq:
if isinstance(dataframe, pd.Series):
dataframe = pd.DataFrame(dataframe)
# Iteratively build final table
final_df = final_df.join(dataframe, **kwargs)
return final_df
def doNonrecursive(df):
cols=df.columns[2:-5][::-1] # -5 is because we added a bunch of columns not part of fullname, 2 is for the labels
oddeven=1
for col in cols:
df.fullname = df.fullname.str.cat(df[col],na_rep='',sep=(':' if oddeven==0 else '\\'))
oddeven = 0 if oddeven==1 else 1
#if (cols.tolist().index(col)==len(cols)-2): df.path = df.fullname # Save the path when we're near the end
#if (cols.index(col) == len(cols) - 1):
return df
""" Input a df with columns (minimally): Full Label, Label, Code, Type, [Ancestor_Code, Ancestor_Type]*
Will add additional columns: tooltip, h_level, fullname
This is no longer recursive!
"""
def OntProcess(df):
df['fullname']=''
df['tooltip']=''
df['path']=''
df['h_level']=np.nan
df=doNonrecursive(df)
df['fullname']=df['fullname'].map(lambda x: x.lstrip(':\\')).map(lambda x: x.rstrip(':\\'))
df['fullname']='\\0\\'+df['fullname'].map(str)+"\\"
df=df.append({'fullname':'\\0\\'},ignore_index=True) # Add root node
df['h_level']=df['fullname'].str.count('\\\\')-2
return df
""" Input a df with (minimally): Full Label, Label, Code, Type, [Ancestor_Code, Ancestor_Type]*
Outputs an i2b2 ontology compatible df.
"""
def OntBuild(df):
odf = pd.DataFrame()
odf['c_hlevel']=df['h_level']
odf['c_fullname']=df['fullname']
odf['c_visualattributes']=df['has_children'].apply(lambda x: 'FAE' if x=='Y' else 'LAE')
odf['c_name']=df['Label']+' ('+df['Full_Label']+')'
odf['c_path']=df['path']
odf['c_basecode']=None
odf.c_basecode[odf['c_basecode'].isnull()]=df.Greatgrandparent_Code.str.cat(df['Type'].str.cat(df['Code'],sep=':'),sep='|')
odf.c_basecode[odf['c_basecode'].isnull()]=df.Grandparent_Code.str.cat(df['Type'].str.cat(df['Code'],sep=':'),sep='|')
if df['VStype'].notnull().any(): odf.c_basecode[df['VStype'].notnull()]=odf['c_basecode'].str.cat(df['VStype'].str.cat(df['VScode'],sep=':'),sep='|') # Support either value set codes or regular code sets
#odf.c_basecode[odf['c_basecode'].isnull()] = df['Grandparent_Code'].str.cat(df['Type'].str.cat(df['Code'], sep=':'))
odf['c_symbol']=odf['c_basecode']
odf['c_synonym_cd']='N'
odf['c_facttablecolumn']='concept_cd'
odf['c_tablename']='concept_dimension'
odf['c_columnname']='concept_path'
odf['c_columndatatype']='T' #this is not the tval/nval switch - 2/20/18 - df['vtype'].apply(lambda x: 'T' if x==2 else 'N')
odf['c_totalnum']=''
odf['c_operator']='LIKE'
odf['c_dimcode']=df['fullname']
odf['c_comment']=df['Type']
odf['c_tooltip']=df['fullname'] # Tooltip right now is just the fullname again
odf['m_applied_path']='@'
odf['c_metadataxml']=df[['vtype','Label']].apply(lambda x: mdx.genXML(mdx.mapper(x[0]),x[1]),axis=1)
return odf
""" Input a df with (minimally): two columns, 0 is c_name, 1 is code. Second argument is a root node.
Outputs an i2b2 ontology compatible df.
"""
def OntSimpleBuild(df,root):
odf = pd.DataFrame()
odf['c_name']=df.iloc[:,0]
odf['c_fullname']='\\'+root+'\\'+df.iloc[:,1]+'\\'
odf['c_hlevel']=1
odf['c_visualattributes']='LAE'
odf['c_path']='\\'+root+'\\'
odf['c_basecode']=root+':'+df.iloc[:,1]
odf['c_symbol']=df.iloc[:,1]
odf['c_synonym_cd']='N'
odf['c_facttablecolumn']='concept_cd'
odf['c_tablename']='concept_dimension'
odf['c_columnname']='concept_path'
odf['c_columndatatype']='T'
odf['c_totalnum']=''
odf['c_operator']='LIKE'
odf['c_dimcode']=odf['c_fullname']
odf['c_comment']=''
odf['c_tooltip']=odf['c_fullname'] # Tooltip right now is just the fullname again
odf['m_applied_path']='@'
odf['c_metadataxml']=''#df[['vtype','Label']].apply(lambda x: mdx.genXML(mdx.mapper(x[0]),x[1]),axis=1)
return odf
def mergeValueSets(df,df_vs):
dfm = df[df.Type=='row'].merge(df_vs,left_on='Code',right_on='Row ID')
dfm['Row ID']=dfm['Row ID']+':'+dfm['Line NBR']
dfm['Line NBR']='valueset'
dfm['Full_Label']=dfm['Custom List Value TXT']
dfm['Label']=dfm['Custom List Map Value TXT']
cols=dfm.columns
dfm=dfm[cols[[0,1,-4,-3]].append(cols[2:-4])] # Rearrange to get the new row codes at the front
dfm=dfm.rename(columns={'Row ID':'VScode','Line NBR':'VStype'})
dfm.has_children='N' # These are the nodes!
df['VScode']=np.nan # All this cleaning on the original df is necessary because pandas inexplicably reorders all the rows if the names don't all match
df['VStype']=np.nan
df=df[df.columns[[0,1,-2,-1]].append(df.columns[2:-2])]
return | pd.concat([df,dfm]) | pandas.concat |
import pandas as pd
from psychometrics.CTT import examinee_score
from psychometrics.reliability import k_and_k, item_var_sum, total_var
#todo DOMC
#todo identify poorly performing items
def latency_analysis(data, item_id='item_id',latency='latency'):
latencies = data.groupby(item_id)[latency].mean()
return latencies
def option_analysis(data, distractor_correct='distractor_correct', total_score='total_score'):
distractor_data = []
for item in data['item_id'].unique():
df_item = data[data['item_id'] == item]
distractor_list = df_item['distractor_id'].unique()
for distractor in distractor_list:
new_data = {'item_id': item,
'distractor_id': distractor}
df_distractor = df_item[df_item['distractor_id'] == distractor]
correlation = df_distractor[distractor_correct].corr(df_distractor[total_score])
new_data['correlation'] = correlation
distractor_data.append(new_data)
df_distractor_data = pd.DataFrame(distractor_data)
return df_distractor_data
def get_p_values(data):
'''
returns p-values for every item in the dataframe
:param data: a pandas dataframe where columns are items and rows are examinees.
:return: a vector of p-values for each item in the assessment.
'''
p_values = pd.DataFrame(data.mean(axis=0))
p_values.columns = ['P_Value']
p_values['Item'] = p_values.index
return p_values
def alpha_without_item(items):
alpha_without_items = pd.Series(index=items.columns, name='Alpha Without Item')
for item in items.columns:
use_items = items.drop(item, axis=1)
alpha_without_items.ix[item] = k_and_k(use_items) * (1 - (item_var_sum(use_items) / total_var(use_items)))
return alpha_without_items
def discrimination_index(items):
'''
Calculates the point biserial and biserial for each item on the exam. Essentially these are item-total correlations where the item is not included in the total score (point-biserial) and is included int he total score (biserial).
:param items: a pandas dataframe with columns for each item and rows for each examinee.
:return: two dataframes. one containing the point-biserials and the other containing the biserials
'''
stat_without_item = | pd.Series(index=items.columns, name='Point Biseral') | pandas.Series |
import logging
from urllib.error import URLError
import pandas as pd
from pvlib import iotools
from solarforecastarbiter.io.reference_observations import (
common, default_forecasts)
solrad_variables = ['ghi', 'dni', 'dhi']
SOLRAD_FTP_DIR = "ftp://aftp.cmdl.noaa.gov/data/radiation/solrad/"
REALTIME_URL = SOLRAD_FTP_DIR + "/realtime/{abbr}/{abrv}{year_2d}{jday}.dat"
ARCHIVE_URL = SOLRAD_FTP_DIR + "/{abrv}/{year}/{abrv}{year_2d}{jday}.dat"
logger = logging.getLogger('reference_data')
def fetch(api, site, start, end, realtime=False):
"""Retrieve observation data for a solrad site between start and end.
Parameters
----------
api : io.APISession
An APISession with a valid JWT for accessing the Reference Data
user.
site : datamodel.Site
Site object with the appropriate metadata.
start : datetime
The beginning of the period to request data for.
end : datetime
The end of the period to request data for.
realtime : bool
Whether or not to look for realtime data. Note that this data is
raw, unverified data from the instruments.
Returns
-------
data : pandas.DataFrame
All of the requested data concatenated into a single DataFrame.
"""
if realtime:
url_format = REALTIME_URL
else:
url_format = ARCHIVE_URL
# load extra parameters for api arguments.
extra_params = common.decode_extra_parameters(site)
abbreviation = extra_params['network_api_abbreviation']
single_day_dfs = []
for day in pd.date_range(start, end):
filename = url_format.format(abrv=abbreviation,
year=day.year,
year_2d=day.strftime('%y'),
jday=day.strftime('%j'))
logger.info(f'Requesting data for SOLRAD site {site.name}'
f' on {day.strftime("%Y%m%d")}.')
try:
# Only get dataframe from the returned tuple
solrad_day = iotools.read_solrad(filename)
except URLError:
logger.warning(f'Could not retrieve SOLRAD data for site '
f'{site.name} on {day.strftime("%Y%m%d")}.')
logger.debug(f'Failed SOLRAD URL: {filename}.')
continue
else:
single_day_dfs.append(solrad_day)
try:
all_period_data = pd.concat(single_day_dfs)
except ValueError:
logger.warning(f'No data available for site {site.name} '
f'from {start} to {end}.')
return | pd.DataFrame() | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.