prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
import hashlib
import statsmodels.stats.multitest as multitest
import scipy as sp
import scipy.stats
import numpy as np
from collections import namedtuple
import warnings
from tqdm import tqdm
# from .annotations import ProteinId, PeptideId, PeptideVariantId
# from tqdm import tqdm
# from modlamp.sequences import Helices
# from modlamp.descriptors import PeptideDescriptor, GlobalDescriptor
################################################################################
# namedtuples
################################################################################
TTestResult = namedtuple("TTestResult", ("pvalue", "statistic", "ratio"))
PeptideVariantId = namedtuple('PeptideVariantId',
('protein_id', 'start', 'stop', 'mod_seq', 'origin'))
PeptideId = namedtuple('PeptideId', ('protein_id', 'start', 'stop'))
ProteinId = namedtuple('ProteinId', ('protein_id',))
################################################################################
# Pandas peptidomics Excensions
################################################################################
@pd.api.extensions.register_index_accessor("peptidomics")
class PandasPeptidomicsIndex:
def __init__(self, index):
self.index = index
id_classes = (PeptideVariantId, PeptideId, ProteinId)
id_levels = ("peptide_variants", "peptides", "proteins")
for id_class, level in zip(id_classes, id_levels):
if id_class._fields == self.index.names[1:]:
self.id_class = id_class
self.level = level
break
else: # no break
raise ValueError("index does not seem to be 'proteins', 'peptides' or "
"peptide_variants")
def to_proteins(self):
if self.level == 'peptides':
drop = ("start", "stop")
elif self.level == "peptide_variants":
drop = ("start", "stop", "mod_seq", "origin")
else:
raise ValueError("index does not seem to be 'peptides' or 'peptide_variants'")
return self._drop_index(drop)
def to_peptides(self):
if self.level != 'peptide_variants':
raise ValueError("can only craete peptide index from a peptide_variant index")
return self._drop_index(("mod_seq", "origin"))
def _drop_index(self, drop):
index = self.index.copy()
for level in drop:
index = index.droplevel(level)
return index.unique()
def iter_index(self):
for row in self.index:
yield row[0], self.id_class(*row[1:])
def create_annotation_df(self):
annotation_columns = pd.MultiIndex.from_tuples((), names=['annotation', 'group'])
return pd.DataFrame(columns=annotation_columns, index=self.index)
@pd.api.extensions.register_series_accessor("peptidomics")
class PandasPeptidomicsSerie:
def __init__(self, series):
self.series = series
def fdr(self, alpha=0.05, method='fdr_bh'):
if method != "fdr_bh":
raise NotImplementedError("TODO!!!")
mask = self.series == self.series
corrected = np.full(self.series.shape, np.nan)
corrected[mask] = multitest.multipletests(self.series[mask], method=method, alpha=alpha)[1]
return pd.Series(corrected, index=self.series.index, name=("FDR", self.series.name[1]))
@pd.api.extensions.register_dataframe_accessor("peptidomics")
class PandasPeptidomics:
rename_header = {"end": "stop", "begin": "start", "prot_acc": "protein_id",
"pep_start": "start", "pep_end": "stop", "pep_mod_seq": "mod_seq",
"pep_seq": "seq"}
def __init__(self, df):
self.df = df
def create_annotation(self):
annotation_columns = pd.MultiIndex.from_tuples((), names=['annotation', 'group'])
return pd.DataFrame(columns=annotation_columns, index=self.df.index)
def add_annotation(self, annotation_series):
self.df[annotation_series.name] = annotation_series
@classmethod
def load_ppv_file(cls, ppv_file, campaign_id, index=None):
print('Load ppv file')
ppv = pd.read_csv(ppv_file, sep="\t")
ppv.rename(columns=cls.rename_header, inplace=True)
ppv["campaign_id"] = campaign_id
ppv["origin"] = "collapsed"
if index is None:
index = ['campaign_id'] + list(PeptideVariantId._fields)
return pd.pivot_table(ppv, index=index, values="score")
def _ppv_pval_combine(self, subset, compare, pvalue_name="P-Value", ratio_name="Ratio"):
gt = subset[subset[ratio_name, compare] >= 0][pvalue_name, compare] / 2
lt = subset[subset[ratio_name, compare] <= 0][pvalue_name, compare] / 2
gt_flip = 1 - gt
lt_flip = 1 - lt
pval_gt = np.append(gt.values, lt_flip.values)
pval_lt = np.append(lt.values, gt_flip.values)
p1 = sp.stats.combine_pvalues(pval_gt)[1]
p2 = sp.stats.combine_pvalues(pval_lt)[1]
pval = min(p1, p2) * 2
return pval
def ppv_pval(self, df_observed, compare, method="fisher",
annotation_name="P-Value", progresbar=True):
if method != "fisher":
raise NotImplementedError("Only fishers method is implemented")
_iter = (self._ppv_pval_combine(subset, compare)
for subset in self._iter_ppvs(df_observed))
if progresbar:
_iter = tqdm(_iter, "Collapsing p-values from UPF", total=self.df.shape[0])
return pd.Series(list(_iter), index=self.df.index, name=(annotation_name, compare))
def _ppv_intensity_combine(self, subset, group):
return pd.Series(subset[group].values.flatten()).mean()
def ppv_intensity(self, df_observed, group, annotation_name="Intensity", progresbar=True):
_iter = (self._ppv_intensity_combine(subset, group)
for subset in self._iter_ppvs(df_observed))
if progresbar:
_iter = tqdm(_iter, "Collapsing intensities from UPF", total=self.df.shape[0])
return pd.Series(list(_iter), index=self.df.index, name=(annotation_name, group))
def _iter_ppvs(self, df):
for index, ppv_data in self.df.iterrows():
campaign_id, protein_id, start, stop, mod_seq, origin = index
subset = df.query('protein_id == "{}" and start >= {} and stop <= {}'.format(
protein_id, start, stop))
yield subset
def ppv_ratio(self, df_observed, compare, annotation_name="Ratio", progresbar=True):
_iter = (subset[annotation_name, compare].median()
for subset in self._iter_ppvs(df_observed))
if progresbar:
_iter = tqdm(_iter, "Annotating ratio from UPF", total=self.df.shape[0])
return pd.Series(list(_iter), index=self.df.index, name=(annotation_name, compare))
@classmethod
def load_upf_meta(cls, upf_file, meta_file, campaign_id, upf_file_sep="\t", meta_file_sep="\t",
left_on='rs_acc', right_on='accno', pivot=True, add_meta_defaults=True,
**kwargs):
# TODO: campaing ID should be infered from dataset.meta
# print('Load, merge and create peptide id')
upf = pd.read_csv(upf_file, sep=upf_file_sep)
upf.rename(columns=cls.rename_header, inplace=True)
meta = cls._load_meta_from_file(meta_file, meta_file_sep, add_meta_defaults)
upf_sample = pd.merge(meta, upf, left_on=left_on, right_on=right_on)
upf_sample["campaign_id"] = campaign_id
upf_sample["origin"] = "observed"
if pivot:
return cls.ms_pivot_table(upf_sample, **kwargs)
return upf_sample
@classmethod
def _load_meta_from_file(cls, meta_file, sep, add_defaults=True):
meta = pd.read_csv(meta_file, sep=sep)
if not add_defaults:
return meta
if 'subject' not in meta.columns or meta['subject'].dropna().shape[0] == 0:
# if there is no subject info assume that each sample is a unique subject
meta["subject"] = meta["ps_acc"]
if "qc" not in meta.columns or meta['qc'].dropna().shape[0] == 0:
# if there are no qc info, all samples were probbably fine :D
meta["qc"] = "OK"
return meta
@classmethod
def ms_pivot_table(cls, df, values='intensity',
index=['campaign_id', 'protein_id', 'start', 'stop', 'mod_seq', 'origin'],
columns=['subject', 'group', 'qc']):
return pd.pivot_table(df, values=values, index=index, columns=columns)
def normalize(self, full_rank='auto', full_rank_cutoff=100):
raw_data = self.df.replace(0, np.nan)
log10_data = np.log10(raw_data)
log10_data_full_rank = log10_data.dropna()
rank = log10_data_full_rank.shape[0]
if full_rank == 'auto':
full_rank = rank >= full_rank_cutoff
if full_rank:
if rank < full_rank_cutoff:
msg = ("The full rank of the normalization matrix is {}, consider "
"consider running df.peptidomics.normalize(full_rank=False) instead")
warnings.warn(msg.format(rank))
median_intensities = log10_data_full_rank.median()
else:
median_intensities = log10_data.median()
norm_target = median_intensities.mean()
normalization_factors = norm_target - median_intensities
# ax = full_rank_median_intensities.plot(kind="hist",
# title="Median Intensity across samples")
return 10 ** (log10_data + normalization_factors)
@classmethod
def _ttest_return(cls, t_test, index, ratios):
pvalue = | pd.Series(t_test.pvalue.data, index=index, name="P-Value") | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 20 10:08:35 2020
@author: suyu
"""
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.metrics import mean_squared_error,roc_auc_score,mean_absolute_error,log_loss
from sklearn.preprocessing import MinMaxScaler
import sys
from sklearn.metrics import make_scorer
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
sys.path.append('../')
from gammli.DataReader import data_initialize
sys.path.append('benchmark/deepfm' )
from fmDataReader import FeatureDictionary, DataParser
from DeepFM import DeepFM
def deepfm_fm(wc, data, meta_info_ori, task_type="Regression", random_state=0, params=None):
train, test = train_test_split(data, test_size=0.2, random_state=0)
tr_x, tr_Xi, tr_y, tr_idx, te_x, te_Xi, te_y, val_x, val_Xi, val_y, val_idx, meta_info, model_info, sy, sy_t = data_initialize(train, test, meta_info_ori, task_type, 'warm', random_state=0, verbose=True)
epochs = params['epochs']
loss_type = params['loss_type']
eval_metric = params['eval_metric']
greater_is_better = params['greater_is_better']
verbose = params['verbose']
early_stopping = params['early_stopping']
NUMERIC_COLS = []
CATEGORICAL_COLS = []
IGNORE_COLS = []
for i, (key, item) in enumerate(meta_info.items()):
if item['type'] == "categorical":
if len(meta_info[key]['values']) ==2:
NUMERIC_COLS.append(key)
else:
CATEGORICAL_COLS.append(key)
elif item['type'] == "target":
IGNORE_COLS.append(key)
else:
NUMERIC_COLS.append(key)
# params
dfm_params = {
"embedding_size": 3,
"deep_layers": [32, 32],
"use_deep" : True ,
"use_fm" : True ,
"deep_layers_activation": tf.nn.relu,
"loss_type" : loss_type,
"epoch": epochs ,
"batch_size": 1000,
"learning_rate": 0.001,
"optimizer_type": "adam",
"batch_norm": 0,
"batch_norm_decay": 0.995,
"l2_reg": 0.1,
"greater_is_better" : greater_is_better,
"verbose": verbose,
"eval_metric": eval_metric,
"random_seed": random_state
}
def _run_base_model_dfm(dfTrain, dfTest, folds, dfm_params):
fd = FeatureDictionary(dfTrain=dfTrain, dfTest=dfTest,
numeric_cols=NUMERIC_COLS,
ignore_cols=IGNORE_COLS)
data_parser = DataParser(feat_dict=fd)
Xi_train, Xv_train, y_train = data_parser.parse(df=dfTrain, has_label=True)
Xi_test, Xv_test, ids_test,idv_test = data_parser.parse(df=dfTest)
dfm_params["feature_size"] = fd.feat_dim
#print(fd.feat_dict)
dfm_params["field_size"] = len(Xi_train[0])
print(dfm_params)
y_train_meta = np.zeros((dfTrain.shape[0], 1), dtype=float)
y_test_meta = np.zeros((dfTest.shape[0], 1), dtype=float)
_get = lambda x, l: [x[i] for i in l]
#gini_results_cv = np.zeros(len(folds), dtype=float)
#gini_results_epoch_train = np.zeros((len(folds), dfm_params["epoch"]), dtype=float)
#gini_results_epoch_valid = np.zeros((len(folds), dfm_params["epoch"]), dtype=float)
y_train = list(map(float,y_train))
for i, (train_idx, valid_idx) in enumerate(folds):
Xi_train_, Xv_train_, y_train_ = _get(Xi_train, train_idx), _get(Xv_train, train_idx), _get(y_train, train_idx)
Xi_valid_, Xv_valid_, y_valid_ = _get(Xi_train, valid_idx), _get(Xv_train, valid_idx), _get(y_train, valid_idx)
dfm = DeepFM(**dfm_params)
dfm.fit(Xi_train_, Xv_train_, y_train_, Xi_valid_, Xv_valid_, y_valid_,early_stopping=early_stopping)
y_train_meta[valid_idx,0] = dfm.predict(Xi_valid_, Xv_valid_)
y_test_meta[:,0] += dfm.predict(Xi_test, Xv_test)
#gini_results_cv[i] = mean_absolute_error(y_valid_, y_train_meta[valid_idx])
#gini_results_epoch_train[i] = dfm.train_result
#gini_results_epoch_valid[i] = dfm.valid_result
y_test_meta /= float(len(folds))
return y_train_meta, y_test_meta
if task_type == "Regression":
cold_mae = []
cold_rmse = []
warm_mae = []
warm_rmse = []
def model_choose(deep):
dfm_params['use_deep']=deep
for times in range(10):
train, test = train_test_split(data, test_size=0.2, random_state=times)
tr_x, tr_Xi, tr_y, tr_idx, te_x, te_Xi, te_y, val_x, val_Xi, val_y, val_idx, meta_info, model_info, sy, sy_t = data_initialize(train, test, meta_info_ori, task_type, 'warm', random_state=0, verbose=False)
train_x = np.concatenate([tr_x,val_x],0)
train_y = np.concatenate([tr_y,val_y],0)
train_y = sy.inverse_transform(train_y)
te_y = sy_t.inverse_transform(te_y)
train_Xi = np.concatenate([tr_Xi,val_Xi],0)
train_ = np.concatenate([train_x,train_Xi,train_y],1)
test_ = np.concatenate([te_x,te_Xi,te_y],1)
dfTrain = | pd.DataFrame(train_,columns=train.columns) | pandas.DataFrame |
import os
import time
import pandas as pd
import scipy
import seaborn as sns
from matplotlib import pyplot as plt
# import warnings
from kl_simulation_support import *
from kl_simulation_support import grow_ode
from scipy.stats import entropy
# warnings.filterwarnings(action="error", category=np.ComplexWarning)
def sigmoid_switch(mu, mumax, H0, beta, musens, noiserange):
return noiserange * H0 * (mumax - mu) ** beta / ((mumax - musens) ** beta + (mumax - mu) ** beta) + H0
def exponential_mudep(mu, mumin, H0, beta, noiserange):
return noiserange * H0 * np.exp(-beta * (mu - mumin)) + H0
def linear_mudep(mu, mumin, mumax, H0, noiserange):
return H0 + ((mumax - mu) / (mumax - mumin)) * noiserange * H0
def switching_mat(mat_fit, num_env, num_phen, switch_rate=0.1, mu_dep=False, noiserange=4, beta=1, musens=0.5, mumax=2,
mumin=0, dependency='sigmoid', sensing_cost=0, switch_matrices=None):
"""
Function which creates a switching matrix with. This matrix is combined with the fitness matrix and
returned as one matrix.
:return:
:param musens:
:param mumax:
:param noiserange:
:param mat_fit: growth rate matrix
:param num_env: number of phenotypes
:param num_phen: number of environments
:param switch_rate: switching rate
:param mu_dep: boolean to indicate whether the switching rate should depend on mu
:param beta: constant for tuning sensitivity to mu in case of mu dependency
:return a: growth/switching matrix
"""
# First set all away-switching rates, then sum them in diagonal
cost_mat = np.zeros((num_env, num_phen, num_phen))
if switch_matrices is not None:
return mat_fit + switch_matrices - cost_mat
if mu_dep:
if dependency == 'sigmoid':
mat_switch = np.zeros((num_env, num_phen, num_phen))
if np.isscalar(switch_rate) | (len([switch_rate]) == 1):
for i in range(num_env):
for j in range(num_phen):
mat_switch[i, :, j] = sigmoid_switch(mat_fit[i, j, j], mumax, switch_rate, beta, musens,
noiserange)
mat_switch[i, j, j] = -(num_phen - 1) * mat_switch[i, j, j]
elif dependency == 'exponential':
mat_switch = np.zeros((num_env, num_phen, num_phen))
if np.isscalar(switch_rate) | (len([switch_rate]) == 1):
for i in range(num_env):
for j in range(num_phen):
mat_switch[i, :, j] = exponential_mudep(mat_fit[i, j, j], mumin, switch_rate, beta, noiserange)
mat_switch[i, j, j] = -(num_phen - 1) * mat_switch[i, j, j]
elif dependency == 'linear':
mat_switch = np.zeros((num_env, num_phen, num_phen))
if np.isscalar(switch_rate) | (len([switch_rate]) == 1):
for i in range(num_env):
for j in range(num_phen):
mat_switch[i, :, j] = linear_mudep(mat_fit[i, j, j], mumin, mumax, switch_rate, noiserange)
mat_switch[i, j, j] = -(num_phen - 1) * mat_switch[i, j, j]
elif dependency == 'sensing':
mat_switch = np.zeros((num_env, num_phen, num_phen))
if np.isscalar(switch_rate) | (len([switch_rate]) == 1):
for i in range(num_env):
np.fill_diagonal(cost_mat[i], sensing_cost)
fast_phen = np.argmax(np.diagonal(mat_fit[i]))
np.fill_diagonal(mat_switch[i], -switch_rate)
mat_switch[i, fast_phen, :] += switch_rate
else:
if np.isscalar(switch_rate) | (len([switch_rate]) == 1):
mat_switch = np.ones((num_env, num_phen, num_phen)) * switch_rate
for i in range(num_env):
np.fill_diagonal(mat_switch[i], -(num_phen - 1) * switch_rate)
return mat_fit + mat_switch - cost_mat
def kl_optimisation(fit_mat, x_0, num_env, num_phen, env_seqs, total_times, mean_mu_max, switch_rate=0.1, mu_dep=False,
max_switch_rate=1, beta=1, iterations=10, switch_basis='away'):
"""
:param: fit_mat:
:param: b:
:param: x_0:
:param: max_mus:
:param num_env:
:param num_phen:
:param switch_rate:
:param mu_dep:
:param seed:
:param max_switch_rate:
:param beta:
:param min_sim_time:
:param avg_env_length:
:param iterations:
:return: deviation from maximal mean fitness as a fraction.
"""
a_mat = switching_mat(fit_mat, num_env, num_phen, switch_rate=switch_rate, mu_dep=mu_dep,
max_switch_rate=max_switch_rate, beta=beta, switch_basis=switch_basis)
# TODO: check how close eigenvalues are
eig_vals, eig_vecs = np.linalg.eig(a_mat)
m_inverse = np.linalg.inv(eig_vecs)
mean_mu = np.zeros(iterations)
for i in range(iterations):
start = time.time()
x_t_fracs = np.zeros((num_phen, len(env_seqs[i][0]) + 1)) # initialise array to store phenotype fractions
x_t_fracs[:, 0] = x_0
mu = np.zeros(len(env_seqs[i][0])) # array to store mean and max growth rate after each environment
# solve system for specific sequence of environments
for ind, env in enumerate(env_seqs[i][0]):
c_scale = np.dot(m_inverse[env], x_t_fracs[:, ind])
x_t_fracs[:, ind + 1], mu[ind], extinction = grow(eig_vecs[env], eig_vals[env], c_scale,
env_seqs[i][1][ind])
if extinction:
return 1
mean_mu[i] = sum(mu * env_seqs[i][1]) / total_times[i]
print('environment sequence ' + str(i) + ' finished in ' + str(time.time() - start) + ' seconds')
frac_max_mu = mean_mu / mean_mu_max
return 1 - np.average(frac_max_mu)
def kl_simulation(fit_mat, x_0, num_env, num_phen, env_seq, total_time, mean_mu_max, switch_rate=0.1, mu_dep=False,
noiserange=4, beta=1, musens=0.5, mumax=2, dependency='sigmoid', sensing_cost=0,
switch_matrices=None):
"""
:param total_times:
:param mean_mu_max:
:param fit_mat:
:param x_0:
:param env_seq:
:param num_env:
:param num_phen:
:param switch_rate:
:param mu_dep:
:param noiserange:
:param beta:
:return: deviation from maximal mean fitness as a fraction.
"""
a_mat = switching_mat(fit_mat, num_env, num_phen, switch_rate=switch_rate, mu_dep=mu_dep, sensing_cost=sensing_cost,
noiserange=noiserange, beta=beta, musens=musens, mumax=mumax, dependency=dependency,
switch_matrices=switch_matrices)
# TODO: check how close eigenvalues are
eig_vals, eig_vecs = np.linalg.eig(a_mat)
m_inverse = np.linalg.inv(eig_vecs)
growthcosts = np.zeros(num_env)
for env in range(num_env):
try:
growthcosts[env] = (np.max(fit_mat[env]) - eig_vals[env].max()) # /fit_mat[env,env,env]
except:
max_eig_val = eig_vals[env].max()
if np.imag(max_eig_val) < 1e-10:
growthcosts[env] = (np.max(fit_mat[env]) - np.real(max_eig_val)) # /fit_mat[env,env,env]
else:
growthcosts[env] = (np.max(fit_mat[env]) - eig_vals[env].max()) # /fit_mat[env,env,env]
x_t_fracs = np.zeros((num_phen, len(env_seq[0]) + 1)) # initialise array to store phenotype fractions
x_t_fracs[:, 0] = x_0
mu = np.zeros(len(env_seq[0])) # array to store mean and max growth rate after each environment
x_t_fracs2 = np.zeros((num_phen, len(env_seq[0]) + 1)) # initialise array to store phenotype fractions
x_t_fracs2[:, 0] = x_0
# Initialize array for storing results
results = np.zeros((4, len(env_seq[0])))
# solve system for specific sequence of environments
for ind_env, env in enumerate(env_seq[0]):
c_scale = np.dot(m_inverse[env], x_t_fracs[:, ind_env])
x_t_fracs[:, ind_env + 1], mu[ind_env], extinction, lag = grow_reportlag(eig_vecs[env],
eig_vals[env],
c_scale,
env_seq[1][ind_env])
results[0, ind_env] = ind_env
results[1, ind_env] = lag
results[2, ind_env] = growthcosts[env]
results[3, ind_env] = (mu[ind_env] * lag) / np.log(2)
if extinction:
continue
mean_mu = sum(mu * env_seq[1]) / total_time
frac_max_mu = mean_mu / mean_mu_max
results_df = pd.DataFrame(data=np.transpose(results), columns=['envnumber', 'lag', 'growthcost', 'lag_gen'])
results_df['meanmu'] = mean_mu
results_df['frac_max_mu'] = frac_max_mu
return frac_max_mu, results_df
def kl_simulation_ode(fit_mat, x_0, num_env, num_phen, env_seq, total_time, mean_mu_max, switch_rate=0.1, mu_dep=False,
noiserange=4, beta=1, musens=0.5, mumax=2, dependency='sigmoid', sensing_cost=0,
switch_matrices=None):
"""
:param total_times:
:param mean_mu_max:
:param fit_mat:
:param x_0:
:param env_seq:
:param num_env:
:param num_phen:
:param switch_rate:
:param mu_dep:
:param noiserange:
:param beta:
:return: deviation from maximal mean fitness as a fraction.
"""
a_mat = switching_mat(fit_mat, num_env, num_phen, switch_rate=switch_rate, mu_dep=mu_dep, sensing_cost=sensing_cost,
noiserange=noiserange, beta=beta, musens=musens, mumax=mumax, dependency=dependency,
switch_matrices=switch_matrices)
x_t_fracs = np.zeros((num_phen, len(env_seq[0]) + 1)) # initialise array to store phenotype fractions
x_t_fracs[:, 0] = x_0
mu = np.zeros(len(env_seq[0])) # array to store mean and max growth rate after each environment
# Initialize array for storing results
results = np.zeros((1, len(env_seq[0])))
# solve system for specific sequence of environments
for ind_env, env in enumerate(env_seq[0]):
x_t_fracs[:, ind_env + 1], mu[ind_env], extinction = grow_ode(a_mat[env], x_t_fracs[:, ind_env],
env_seq[1][ind_env])
results[0, ind_env] = ind_env
if extinction:
continue
mean_mu = sum(mu * env_seq[1]) / total_time
frac_max_mu = mean_mu / mean_mu_max
results_df = pd.DataFrame(data=np.transpose(results), columns=['envnumber'])
results_df['meanmu'] = mean_mu
results_df['frac_max_mu'] = frac_max_mu
return frac_max_mu, results_df
def kl_simulation_IR(fit_mat, x_0, num_env, num_phen, env_seq, total_time, mean_mu_max,
switch_rate=0.1, mu_dep=False,
noiserange=4, beta=1, musens=0.5, mumax=2, dependency='sigmoid',
sensing_cost=0, switch_matrices=None):
"""
:param total_times:
:param mean_mu_max:
:param fit_mat:
:param x_0:
:param env_seq:
:param num_env:
:param num_phen:
:param switch_rate:
:param mu_dep:
:param noiserange:
:param beta:
:return: deviation from maximal mean fitness as a fraction.
"""
a_mat = switching_mat(fit_mat, num_env, num_phen, switch_rate=switch_rate, mu_dep=mu_dep, sensing_cost=sensing_cost,
noiserange=noiserange, beta=beta, musens=musens, mumax=mumax, dependency=dependency,
switch_matrices=switch_matrices)
# TODO: check how close eigenvalues are
eig_vals, eig_vecs = np.linalg.eig(a_mat)
m_inverse = np.linalg.inv(eig_vecs)
growthcosts = np.zeros(num_env)
for env in range(num_env):
try:
growthcosts[env] = (np.max(fit_mat[env]) - eig_vals[env].max()) # /fit_mat[env,env,env]
except:
max_eig_val = eig_vals[env].max()
if np.imag(max_eig_val) < 1e-10:
growthcosts[env] = (np.max(fit_mat[env]) - np.real(max_eig_val)) # /fit_mat[env,env,env]
else:
growthcosts[env] = (np.max(fit_mat[env]) - eig_vals[env].max()) # /fit_mat[env,env,env]
x_t_fracs = np.zeros((num_phen, len(env_seq[0]) + 1)) # initialise array to store phenotype fractions
x_t_fracs[:, 0] = x_0
mu = np.zeros(len(env_seq[0])) # array to store mean and max growth rate after each environment
# Initialize array for storing results
results = np.zeros((4, len(env_seq[0])))
# solve system for specific sequence of environments
x_init = x_0
for ind_env, env in enumerate(env_seq[0]):
c_scale = np.dot(m_inverse[env], x_init)
x_t_fracs[:, ind_env + 1], mu[ind_env], extinction, lag = grow_reportlag(eig_vecs[env],
eig_vals[env],
c_scale,
env_seq[1][ind_env])
results[0, ind_env] = ind_env
results[1, ind_env] = lag
results[2, ind_env] = growthcosts[env]
results[3, ind_env] = (mu[ind_env] * lag) / np.log(2)
end_fracs = x_t_fracs[:, ind_env + 1]
opt_phen = np.argmax(np.diag(fit_mat[env]))
x_adapted = end_fracs[opt_phen]
x_init = np.ones(num_phen) * ((1 - x_adapted) / (num_phen - 1))
x_init[opt_phen] = x_adapted
if extinction:
continue
mean_mu = sum(mu * env_seq[1]) / total_time
frac_max_mu = mean_mu / mean_mu_max
results_df = pd.DataFrame(data=np.transpose(results), columns=['envnumber', 'lag', 'growthcost', 'lag_gen'])
results_df['meanmu'] = mean_mu
results_df['frac_max_mu'] = frac_max_mu
return frac_max_mu, results_df
def kl_optimisation_new(fit_mat, x_0, num_env, num_phen, env_seq, total_time, mean_mu_max, switch_rate=0.1,
mu_dep=False, dependency='sigmoid', mumin=0, sensing_cost=0,
noiserange=4, beta=1, musens=0.5, mumax=2):
"""
:param total_times:
:param mean_mu_max:
:param fit_mat:
:param x_0:
:param env_seq:
:param num_env:
:param num_phen:
:param switch_rate:
:param mu_dep:
:param noiserange:
:param beta:
:return: deviation from maximal mean fitness as a fraction.
"""
a_mat = switching_mat(fit_mat, num_env, num_phen, switch_rate=switch_rate, mu_dep=mu_dep, dependency=dependency,
noiserange=noiserange, beta=beta, musens=musens, mumax=mumax, mumin=mumin,
sensing_cost=sensing_cost)
# TODO: check how close eigenvalues are
eig_vals, eig_vecs = np.linalg.eig(a_mat)
m_inverse = np.linalg.inv(eig_vecs)
x_t_fracs = np.zeros((num_phen, len(env_seq[0]) + 1)) # initialise array to store phenotype fractions
x_t_fracs[:, 0] = x_0
mu = np.zeros(len(env_seq[0])) # array to store mean and max growth rate after each environment
# solve system for specific sequence of environments
for ind_env, env in enumerate(env_seq[0]):
c_scale = np.dot(m_inverse[env], x_t_fracs[:, ind_env])
x_t_fracs[:, ind_env + 1], mu[ind_env], extinction = grow(eig_vecs[env], eig_vals[env], c_scale,
env_seq[1][ind_env])
if extinction:
return 0
mean_mu = sum(mu * env_seq[1]) / total_time
frac_max_mu = mean_mu / mean_mu_max
return frac_max_mu
def kl_optimisation_new_IR(fit_mat, x_0, num_env, num_phen, env_seq, total_time, mean_mu_max, switch_rate=0.1,
mu_dep=False, dependency='sigmoid', mumin=0, sensing_cost=0,
noiserange=4, beta=1, musens=0.5, mumax=2):
"""
:param total_times:
:param mean_mu_max:
:param fit_mat:
:param x_0:
:param env_seq:
:param num_env:
:param num_phen:
:param switch_rate:
:param mu_dep:
:param noiserange:
:param beta:
:return: deviation from maximal mean fitness as a fraction.
"""
a_mat = switching_mat(fit_mat, num_env, num_phen, switch_rate=switch_rate, mu_dep=mu_dep, dependency=dependency,
noiserange=noiserange, beta=beta, musens=musens, mumax=mumax, mumin=mumin,
sensing_cost=sensing_cost)
# TODO: check how close eigenvalues are
eig_vals, eig_vecs = np.linalg.eig(a_mat)
m_inverse = np.linalg.inv(eig_vecs)
x_t_fracs = np.zeros((num_phen, len(env_seq[0]) + 1)) # initialise array to store phenotype fractions
x_t_fracs[:, 0] = x_0
mu = np.zeros(len(env_seq[0])) # array to store mean and max growth rate after each environment
# solve system for specific sequence of environments
x_init = x_0
for ind_env, env in enumerate(env_seq[0]):
c_scale = np.dot(m_inverse[env], x_init)
x_t_fracs[:, ind_env + 1], mu[ind_env], extinction = grow(eig_vecs[env], eig_vals[env], c_scale,
env_seq[1][ind_env])
if extinction:
return 0
end_fracs = x_t_fracs[:, ind_env + 1]
opt_phen = np.argmax(np.diag(fit_mat[env]))
x_adapted = end_fracs[opt_phen]
x_init = np.ones(num_phen) * ((1 - x_adapted) / (num_phen - 1))
x_init[opt_phen] = x_adapted
mean_mu = sum(mu * env_seq[1]) / total_time
frac_max_mu = mean_mu / mean_mu_max
return frac_max_mu
def get_mu_pdf(fit_mat, x_0, num_env, num_phen, env_seq, total_time, mean_mu_max, switch_rate=0.1, mu_dep=False,
noiserange=4, beta=1, musens=0.5, mumax=2, mumin=-0.2, n_bins=200, dependency='sigmoid', sensing_cost=0):
"""
:param total_times:
:param mean_mu_max:
:param fit_mat:
:param x_0:
:param env_seq:
:param num_env:
:param num_phen:
:param switch_rate:
:param mu_dep:
:param noiserange:
:param beta:
:return: deviation from maximal mean fitness as a fraction.
"""
a_mat = switching_mat(fit_mat, num_env, num_phen, switch_rate=switch_rate, mu_dep=mu_dep,
noiserange=noiserange, beta=beta, musens=musens, mumax=mumax, dependency=dependency,
mumin=mumin, sensing_cost=sensing_cost)
lowerbound = np.min(np.sum(a_mat, axis=1))
murange = mumax - lowerbound
bincenters = np.linspace(lowerbound - murange / 100, mumax + murange / 100, n_bins)
binwidth = bincenters[1] - bincenters[0]
mu_pdf = np.zeros(n_bins)
t_cur = 0
# TODO: check how close eigenvalues are
eig_vals, eig_vecs = np.linalg.eig(a_mat)
m_inverse = np.linalg.inv(eig_vecs)
x_t_fracs = np.zeros((num_phen, len(env_seq[0]) + 1)) # initialise array to store phenotype fractions
x_t_fracs[:, 0] = x_0
mu = np.zeros(len(env_seq[0])) # array to store mean and max growth rate after each environment
# solve system for specific sequence of environments
for ind_env, env in enumerate(env_seq[0]):
c_scale = np.dot(m_inverse[env], x_t_fracs[:, ind_env])
x_t_fracs[:, ind_env + 1], mu[ind_env], extinction, x_pdf, t_env = grow_reportpdf(eig_vecs[env],
eig_vals[env],
c_scale,
env_seq[1][ind_env])
mu_pdf_env = convert_xpdf_to_mupdf(x_pdf, np.sum(a_mat[env], axis=0), binwidth, bincenters)
mu_pdf = mu_pdf * (t_cur / (t_cur + t_env)) + mu_pdf_env * (t_env / (t_cur + t_env))
t_cur = t_cur + t_env
if extinction:
return 0
mean_mu = sum(mu * env_seq[1]) / total_time
return mean_mu, mu_pdf, bincenters
def get_mu_trace(fit_mat, x_0, num_env, num_phen, env_seq, switch_rate=0.1, mu_dep=False, sensing_cost=0,
noiserange=4, beta=1, musens=0.5, mumax=2, mumin=-0.2, n_bins=200, dependency='sigmoid'):
"""
:param total_times:
:param mean_mu_max:
:param fit_mat:
:param x_0:
:param env_seq:
:param num_env:
:param num_phen:
:param switch_rate:
:param mu_dep:
:param noiserange:
:param beta:
:return: deviation from maximal mean fitness as a fraction.
"""
t_cur = 0
a_mat = switching_mat(fit_mat, num_env, num_phen, switch_rate=switch_rate, mu_dep=mu_dep, sensing_cost=sensing_cost,
noiserange=noiserange, beta=beta, musens=musens, mumax=mumax, dependency=dependency)
# TODO: check how close eigenvalues are
eig_vals, eig_vecs = np.linalg.eig(a_mat)
m_inverse = np.linalg.inv(eig_vecs)
x_t_fracs = np.zeros((num_phen, len(env_seq[0]) + 1)) # initialise array to store phenotype fractions
x_t_fracs[:, 0] = x_0
# Initialize arrays to store trace data
t_trace = [0]
mu_trace = [0]
logOD_trace = [0]
x_trace = [x_0]
# solve system for specific sequence of environments
for ind_env, env in enumerate(env_seq[0]):
c_scale = np.dot(m_inverse[env], x_t_fracs[:, ind_env])
x_t_fracs[:, ind_env + 1], t_trace_env, mu_trace_env, x_trace_env = grow_reporttrace(eig_vecs[env],
eig_vals[env],
c_scale,
env_seq[1][ind_env])
timestep = 0.1
num_steps = max(int(np.ceil(env_seq[1][ind_env] / timestep)), 10)
times = np.linspace(0, env_seq[1][ind_env], num_steps)
logOD_trace_env = grow_reportlogOD(eig_vecs[env], eig_vals[env], c_scale, times)
logOD_trace_env = np.array(logOD_trace_env[1:])
t_trace.extend(t_trace_env + t_cur)
t_cur += env_seq[1][ind_env]
mu_trace.extend(mu_trace_env)
x_trace.extend(x_trace_env)
logOD_trace.extend(logOD_trace_env + logOD_trace[-1])
# t_trace = np.array(t_trace[1:])
# mu_trace = np.array(mu_trace[1:])
# x_trace = np.array(x_trace[1:])
# logOD_trace = np.array(logOD_trace[1:])
t_trace = np.array(t_trace)
mu_trace = np.array(mu_trace)
x_trace = np.array(x_trace)
logOD_trace = np.array(logOD_trace)
return t_trace, mu_trace, x_trace, logOD_trace
def get_logOD_oneenv(fit_mat, x_0, num_env, num_phen, env=0, switch_rate=0.1, mu_dep=False, dependency='sigmoid',
noiserange=4, beta=1, musens=0.5, mumax=2, times=np.linspace(0, 20), sensing_cost=0):
"""
:param fit_mat:
:param x_0:
:param num_env:
:param num_phen:
:param switch_rate:
:param mu_dep:
:param noiserange:
:param beta:
:return: deviation from maximal mean fitness as a fraction.
"""
a_mat = switching_mat(fit_mat, num_env, num_phen, switch_rate=switch_rate, mu_dep=mu_dep, dependency=dependency,
noiserange=noiserange, beta=beta, musens=musens, mumax=mumax, sensing_cost=sensing_cost)
# TODO: check how close eigenvalues are
eig_vals, eig_vecs = np.linalg.eig(a_mat)
m_inverse = np.linalg.inv(eig_vecs)
# solve system for specific sequence of environments
c_scale = np.dot(m_inverse[env], x_0)
logOD_trace = grow_reportlogOD(eig_vecs[env], eig_vals[env], c_scale, times)
max_eig = eig_vals[env].max()
return logOD_trace, max_eig
def get_pretty_pdf(bincenters, mu_pdf, smoothing=0.1, n_bins=200):
mus = bincenters
freqs = mu_pdf
bandwidth = smoothing * mus.std() * mus.size ** (-1 / 5.)
support = np.linspace(min(mus) - bandwidth, max(mus) + bandwidth, n_bins)
supp_width = support[1] - support[0]
kernels = []
for ind, mu_i in enumerate(mus):
if ind % 1000 == 0:
print("Calculating mu dist: '{0}' out of '{1}'".format(ind, len(mus)))
kernel = scipy.stats.norm(mu_i, bandwidth).pdf(support)
kernel = kernel * freqs[ind]
kernels.append(kernel)
density = np.sum(kernels, axis=0)
density /= scipy.integrate.trapz(density, support)
return support, density
def get_landscape_df(fit_mat, env_seq):
num_env = fit_mat.shape[0]
total_time = np.sum(env_seq[1])
landscape_df = pd.DataFrame(columns=['environment', 'growthrate', 'occurrence'])
for env_ind in range(num_env):
growthrates = np.diag(fit_mat[env_ind])
env_df = pd.DataFrame(data=growthrates, columns=['growthrate'])
env_df['environment'] = env_ind
times_env = [env_seq[1][ind] for ind, env in enumerate(env_seq[0]) if env == env_ind]
occurrence = np.sum(times_env) / total_time
env_df['occurrence'] = occurrence
landscape_df = landscape_df.append(env_df, sort=True)
return landscape_df
def convert_xpdf_to_mupdf(x_pdf, growthrates, binwidth, bincenters):
mu_pdf = np.zeros(len(bincenters))
x_pdf = np.real(x_pdf)
for phen, freq in enumerate(x_pdf):
corr_mu = growthrates[phen]
bin_number = np.where((bincenters - binwidth / 2 <= corr_mu) & (bincenters + binwidth / 2 >= corr_mu))[0]
mu_pdf[bin_number] += freq
return mu_pdf
def get_switch_matrix_from_vect(switch_vect, num_phen):
if int(np.sqrt(len(switch_vect))) != num_phen - 1:
raise ValueError('Not the right number of switching rates was given.')
counter = 0
switch_matrix = np.zeros((num_phen, num_phen))
for i in range(num_phen):
for j in range(num_phen):
if i != j:
switch_matrix[i, j] = switch_vect[counter]
counter += 1
return switch_matrix
def mullerplot_fig2(x_variable, y_variable, **kwargs):
ax = plt.gca()
data = kwargs.pop("data")
phen_names = kwargs.pop("phen_names")
env_seq = kwargs.pop("env_seq")
env_changes = np.cumsum(env_seq[1])
if abs(env_changes[-1] - np.max(data['time'])) < 1e-6:
env_changes = env_changes[:-1]
data = data.sort_values('time')
x = np.unique(np.sort(data['time'])).tolist()
y = []
for phen in phen_names:
phen_data = data[data['phenotype'] == phen][['fraction']].values.flatten()
y.append(phen_data)
new_colors = kwargs.pop("new_colors")
ax.stackplot(x, y, colors=new_colors, lw=0.1)
for change in env_changes:
plt.plot([change, change], [0, 1.1], color='0.9', lw=2, ls='--')
def plot_landscape_and_noiserelations(fit_mat, env_seq, res_const_x, res_lin_x, res_exp_x, res_sigm_x, mumax, mumin,
store_figs_filename=False, kinds_to_show=['const', 'lin', 'exp', 'sigm']):
kind_colors = ['#abd9e9', '#d7191c', '#2c7bb6', '#fdae61', ]
num_phen = fit_mat[0].shape[0]
num_env = fit_mat.shape[0]
if num_phen == 2:
phen_colors = sns.xkcd_palette(["greyish", "faded green"])
else:
phen_colors = sns.color_palette("cubehelix", num_phen)
phen_colors.reverse()
landscape_df = get_landscape_df(fit_mat, env_seq)
# calculate and show the optimal switching rates per growth rate
growth_rates = np.linspace(np.min(fit_mat), np.max(fit_mat), 100)
res_array = np.zeros((len(growth_rates), 5))
res_array[:, 0] = growth_rates
res_array[:, 1] = res_const_x
for i, mu in enumerate(growth_rates):
res_array[i, 2] = sigmoid_switch(mu, mumax, res_sigm_x[0], res_sigm_x[2], res_sigm_x[3],
(res_sigm_x[1] - res_sigm_x[0]) / res_sigm_x[0])
for i, mu in enumerate(growth_rates):
res_array[i, 3] = exponential_mudep(mu, mumin, res_exp_x[0], res_exp_x[2],
(res_exp_x[1] - res_exp_x[0]) / res_exp_x[0])
for i, mu in enumerate(growth_rates):
res_array[i, 4] = linear_mudep(mu, mumin, mumax, res_lin_x[0],
(res_lin_x[1] - res_lin_x[0]) / res_lin_x[0])
res_df = pd.DataFrame(data=res_array, columns=['mu', 'const', 'sigm', 'exp', 'lin'])
# traces_df_filtered = traces_df[traces_df['switching type'].isin(kinds_to_show)]
fig, ax = plt.subplots()
ax.set_xlabel('growth rate')
ax.set_ylabel('')
ax.scatter('growthrate', 'environment', s=20, c='gray', data=landscape_df, zorder=1)
ax.tick_params(axis='y', which='both', left=False, right=False, labelleft=False, labelright=False)
if num_phen == num_env:
ax.scatter(np.max(np.max(fit_mat, axis=1), axis=1), range(fit_mat.shape[0]), s=30, c=phen_colors, zorder=2)
ax2 = ax.twinx()
ax2.set_ylabel('switch rate')
for kind_ind, kind in enumerate(kinds_to_show):
ax2.plot('mu', kind, color=kind_colors[kind_ind], lw=2, data=res_df, zorder=-1)
ax2.tick_params(axis='y', which='major', left=True, right=False, labelleft=True, labelright=False)
ax2.tick_params(axis='y', which='minor', left=False, right=False, labelleft=True, labelright=False)
ax2.yaxis.set_label_position("left")
if store_figs_filename:
workingdir = os.getcwd()
filename = store_figs_filename + '_fitnesslandscape'
plt.savefig(os.path.join(workingdir, "results", filename + '.png'))
plt.savefig(os.path.join(workingdir, "results", filename + '.svg'))
def plot_mu_distributions(fit_mat, x0, num_env, num_phen, env_seq, total_time, mean_mu_max, mumax, mumin,
store_figs_filename=False,
res_const_x=[], res_lin_x=[], res_exp_x=[], res_sigm_x=[], res_sensing_x=[],
kinds_to_show=['const', 'lin', 'exp', 'sigm'], sensing_cost=0):
kind_colors = {'sensing': '#d7191c', 'lin': '#fdae61', 'const': '#abd9e9', 'exp': '#fdae61', 'sigm': '#abd9e9'}
nbins = 200
info_dict = {}
# General
res_dict = {'const': res_const_x, 'lin': res_lin_x, 'exp': res_exp_x, 'sigm': res_sigm_x, 'sensing': res_sensing_x}
dep_dict = {'const': '', 'lin': 'linear', 'exp': 'exponential', 'sigm': 'sigmoid', 'sensing': 'sensing'}
pdfs_df = pd.DataFrame(columns=['growth rate', 'probability', 'switching type'])
for sw_type in kinds_to_show:
mu_dep = (sw_type != 'const')
res = res_dict[sw_type]
dep = dep_dict[sw_type]
switch_rate = res[0]
noiserange, beta, musens = (np.nan, np.nan, np.nan)
if len(res) > 1:
noiserange = (res[1] - res[0]) / res[0]
if len(res) > 2:
beta = res[2]
if len(res) > 3:
musens = res[3]
mean_mu, mu_pdf, bincenters = get_mu_pdf(fit_mat, x0, num_env, num_phen, env_seq, total_time,
mean_mu_max, sensing_cost=sensing_cost,
switch_rate=switch_rate, mu_dep=mu_dep,
noiserange=noiserange, mumin=mumin,
beta=beta, musens=musens, mumax=mumax,
n_bins=nbins, dependency=dep)
bincenters_pretty, mu_pdf_pretty = get_pretty_pdf(bincenters, mu_pdf, smoothing=0.1, n_bins=nbins)
pdf_df = pd.DataFrame(
data=np.concatenate((bincenters_pretty[:, np.newaxis], mu_pdf_pretty[:, np.newaxis]), axis=1),
columns=['growth rate', 'probability'])
pdf_df['switching type'] = sw_type
info_dict.update({sw_type: {'mean_mu': mean_mu}})
pdfs_df = pdfs_df.append(pdf_df, ignore_index=True)
pdfs_df_filtered = pdfs_df[pdfs_df['switching type'].isin(kinds_to_show)]
# Plot the bunch
fig, ax = plt.subplots()
ax.set_xlabel('growth rate')
ax.set_ylabel('probability density')
for switch_ind, switchtype in enumerate(pdfs_df_filtered['switching type'].unique().tolist()):
ax.plot('growth rate', 'probability', color=kind_colors[switchtype], lw=2,
data=pdfs_df_filtered[pdfs_df_filtered['switching type'] == switchtype], label=switchtype)
ylims = ax.get_ylim()
for switch_ind, switchtype in enumerate(kinds_to_show):
ax.plot([info_dict[switchtype]['mean_mu'], info_dict[switchtype]['mean_mu']], [ylims[0] - 10, ylims[1] + 10],
color=kind_colors[switchtype],
lw=2, ls='--')
ax.set_ylim(ylims)
plt.legend()
if store_figs_filename:
workingdir = os.getcwd()
filename = store_figs_filename + '_mupdf'
plt.savefig(os.path.join(workingdir, "results", filename + '.png'))
plt.savefig(os.path.join(workingdir, "results", filename + '.svg'))
def plot_mu_trace(fit_mat, x0, num_env, num_phen, env_seq, mumax, mumin, res_const_x=[], res_lin_x=[], res_exp_x=[],
res_sigm_x=[], res_sensing_x=[], envs_to_show=10, kinds_to_show=['const', 'lin', 'exp', 'sigm'],
store_figs_filename=False, sensing_cost=0, phen_colors=None, avg_mus_dict=None):
env_seq = (env_seq[0][:envs_to_show], env_seq[1][:envs_to_show])
kind_colors = {'sensing': '#d7191c', 'lin': '#fdae61', 'const': '#abd9e9', 'exp': '#fdae61', 'sigm': '#abd9e9'}
blackish = '#%02x%02x%02x' % (35, 31, 32)
"""Plot traces of average mu over environment"""
traces_df = pd.DataFrame(columns=['time', 'mu', 'logOD', 'entropy', 'switching type'])
colnames = ['time']
for i in range(num_phen):
colnames.append('phenotype ' + str(i))
frac_traces_df = | pd.DataFrame(columns=['time', 'switching type', 'phenotype', 'fraction']) | pandas.DataFrame |
"""
Dataframe-like class to hold general energy-related timeseries; either volume ([MW] or
[MWh]), price ([Eur/MWh]) or both; in all cases there is a single timeseries for each.
"""
from __future__ import annotations
from . import single_helper
from .base import PfLine
from .. import changefreq
from typing import Dict, Iterable, Union
import pandas as pd
import numpy as np
class SinglePfLine(PfLine):
"""Portfolio line without children. Has a single dataframe; .children is the empty
dictionary.
Parameters
----------
data: Any
Generally: object with one or more attributes or items ``w``, ``q``, ``r``, ``p``;
all timeseries. Most commonly a ``pandas.DataFrame`` or a dictionary of
``pandas.Series``, but may also be e.g. another PfLine object.
Returns
-------
SinglePfLine
Notes
-----
* If the timeseries or values in ``data`` do not have a ``pint`` data type, the
standard units are assumed (MW, MWh, Eur, Eur/MWh).
* If the timeseries or values in ``data`` do have a ``pint`` data type, they are
converted into the standard units.
"""
def __new__(cls, data):
# Catch case where data is already a valid class instance.
if isinstance(data, SinglePfLine):
return data # TODO: return copy
# Otherwise, do normal thing.
return super().__new__(cls, data)
def __init__(self, data: Union[PfLine, Dict, pd.DataFrame, pd.Series]):
self._df = single_helper.make_dataframe(data)
# Implementation of ABC methods.
@property
def children(self) -> Dict:
return {}
@property
def index(self) -> pd.DatetimeIndex:
return self._df.index
@property
def w(self) -> pd.Series:
if self.kind == "p":
return pd.Series(np.nan, self.index, name="w", dtype="pint[MW]")
else:
return pd.Series(self.q / self.index.duration, name="w").pint.to("MW")
@property
def q(self) -> pd.Series:
if self.kind == "p":
return | pd.Series(np.nan, self.index, name="q", dtype="pint[MWh]") | pandas.Series |
from _operator import itemgetter
import gc
from math import sqrt, log10
import math
import os
import pickle
import random
import time
import psutil
from nltk import tokenize as tokenise, stem
import numpy as np
import pandas as pd
class SessionKNN:
'''
SessionKNN(k, sample_size=1000, sampling='recent', similarity='cosine', title_boost=0, seq_weighting=None, idf_weight=None, pop_weight=False, pop_boost=0, artist_boost=0, remind=False, sim_cap=0, normalize=True, neighbor_decay=0, session_key = 'playlist_id', item_key= 'track_id', time_key= 'pos', folder=None, return_num_preds=500 )
Parameters
-----------
k : int
Number of neighboring session to calculate the item scores from. (Default value: 100)
sample_size : int
Defines the length of a subset of all training sessions to calculate the nearest neighbors from. (Default value: 500)
sampling : string
String to define the sampling method for sessions (recent, random). (default: recent)
similarity : string
String to define the method for the similarity calculation (jaccard, cosine, binary, tanimoto). (default: jaccard)
remind : bool
Should the last items of the current session be boosted to the top as reminders
pop_boost : int
Push popular items in the neighbor sessions by this factor. (default: 0 to leave out)
extend : bool
Add evaluated sessions to the maps
normalize : bool
Normalize the scores in the end
session_key : string
Header of the session ID column in the input file. (default: 'SessionId')
item_key : string
Header of the item ID column in the input file. (default: 'ItemId')
time_key : string
Header of the timestamp column in the input file. (default: 'Time')
'''
def __init__( self, k, sample_size=1000, sampling='recent', similarity='cosine', title_boost=0, seq_weighting=None, idf_weight=None, pop_weight=False, pop_boost=0, artist_boost=0, remind=False, sim_cap=0, normalize=True, neighbor_decay=0, session_key = 'playlist_id', item_key= 'track_id', time_key= 'pos', folder=None, return_num_preds=500 ):
self.k = k
self.sample_size = sample_size
self.sampling = sampling
self.similarity = similarity
self.pop_boost = pop_boost
self.artist_boost = artist_boost
self.title_boost = title_boost
self.seq_weighting = seq_weighting
self.idf_weight = idf_weight
self.pop_weight = pop_weight
self.session_key = session_key
self.item_key = item_key
self.time_key = time_key
self.remind = remind
self.normalize = normalize
self.sim_cap = sim_cap
self.neighbor_decay = neighbor_decay
self.return_num_preds = return_num_preds
#updated while recommending
self.session = -1
self.session_items = []
self.relevant_sessions = set()
# cache relations once at startup
self.session_item_map = dict()
self.item_session_map = dict()
self.session_time = dict()
self.folder = folder
self.sim_time = 0
def train( self, data, test=None ):
'''
Trains the predictor.
Parameters
--------
data: pandas.DataFrame
Training data. It contains the transactions of the sessions. It has one column for session IDs, one for item IDs and one for the timestamp of the events (unix timestamps).
It must have a header. Column names are arbitrary, but must correspond to the ones you set during the initialization of the network (session_key, item_key, time_key properties).
'''
train = data['actions']
playlists = data['playlists']
folder = self.folder
if folder is not None and os.path.isfile( folder + 'session_item_map.pkl' ):
self.session_item_map = pickle.load( open( folder + 'session_item_map.pkl', 'rb') )
self.session_time = pickle.load( open( folder + 'session_time.pkl', 'rb' ) )
self.item_session_map = pickle.load( open( folder + 'item_session_map.pkl', 'rb' ) )
else:
index_session = train.columns.get_loc( self.session_key )
index_item = train.columns.get_loc( self.item_key )
#index_time = train.columns.get_loc( self.time_key )
session = -1
session_items = set()
timestamp = -1
cnt = 0
tstart = time.time()
timemap = pd.Series( index=playlists.playlist_id, data=playlists.modified_at )
for row in train.itertuples(index=False):
# cache items of sessions
if row[index_session] != session:
if len(session_items) > 0:
self.session_item_map.update({session : session_items})
# cache the last time stamp of the session
self.session_time.update({session : timestamp})
session = row[index_session]
session_items = set()
timestamp = timemap[row[index_session]]
session_items.add(row[index_item])
# cache sessions involving an item
map_is = self.item_session_map.get( row[index_item] )
if map_is is None:
map_is = set()
self.item_session_map.update({row[index_item] : map_is})
map_is.add(row[index_session])
cnt += 1
if cnt % 100000 == 0:
print( ' -- finished {} of {} rows in {}s'.format( cnt, len(train), (time.time() - tstart) ) )
# Add the last tuple
self.session_item_map.update({session : session_items})
self.session_time.update({session : timestamp})
if folder is not None:
pickle.dump( self.session_item_map, open( folder + 'session_item_map.pkl', 'wb' ) )
pickle.dump( self.session_time, open( folder + 'session_time.pkl', 'wb' ) )
pickle.dump( self.item_session_map, open( folder + 'item_session_map.pkl', 'wb' ) )
self.item_pop = pd.DataFrame()
self.item_pop['pop'] = train.groupby( self.item_key ).size()
#self.item_pop['pop'] = self.item_pop['pop'] / self.item_pop['pop'].max()
self.item_pop['pop'] = self.item_pop['pop'] / len( train )
self.item_pop = self.item_pop['pop'].to_dict()
if self.idf_weight != None:
self.idf = | pd.DataFrame() | pandas.DataFrame |
import logging
import os
import random
import sys
import time
from itertools import chain
from collections import Iterable
import gc
import glob
import numpy as np
import pandas as pd
from PIL import Image
import matplotlib.pyplot as plt
from attrdict import AttrDict
from tqdm import tqdm
from pycocotools import mask as cocomask
from sklearn.model_selection import BaseCrossValidator
from sklearn.externals import joblib
from steppy.base import BaseTransformer, Step
from steppy.utils import get_logger
from skimage.transform import resize
import yaml
from imgaug import augmenters as iaa
import imgaug as ia
import torch
logger = get_logger()
def read_config(config_path):
with open(config_path) as f:
config = yaml.load(f)
return AttrDict(config)
def check_env_vars():
assert os.getenv('NEPTUNE_API_TOKEN'), """You must put your Neptune API token in the \
NEPTUNE_API_TOKEN env variable. You should run:
$ export NEPTUNE_API_TOKEN=your_neptune_api_token"""
assert os.getenv('CONFIG_PATH'), """You must specify path to the config file in \
CONFIG_PATH env variable. For example run:
$ export CONFIG_PATH=neptune.yaml"""
def init_logger():
logger = logging.getLogger('salt-detection')
logger.setLevel(logging.INFO)
message_format = logging.Formatter(fmt='%(asctime)s %(name)s >>> %(message)s',
datefmt='%Y-%m-%d %H-%M-%S')
# console handler for validation info
ch_va = logging.StreamHandler(sys.stdout)
ch_va.setLevel(logging.INFO)
ch_va.setFormatter(fmt=message_format)
# add the handlers to the logger
logger.addHandler(ch_va)
return logger
def get_logger():
return logging.getLogger('salt-detection')
def create_submission(meta, predictions):
output = []
for image_id, mask in zip(meta['id'].values, predictions):
rle_encoded = ' '.join(str(rle) for rle in run_length_encoding(mask))
output.append([image_id, rle_encoded])
submission = pd.DataFrame(output, columns=['id', 'rle_mask']).astype(str)
return submission
def encode_rle(predictions):
return [run_length_encoding(mask) for mask in predictions]
def read_masks(masks_filepaths):
masks = []
for mask_filepath in tqdm(masks_filepaths):
mask = Image.open(mask_filepath)
mask = np.asarray(mask.convert('L').point(lambda x: 0 if x < 128 else 1)).astype(np.uint8)
masks.append(mask)
return masks
def read_images(filepaths):
images = []
for filepath in filepaths:
image = np.array(Image.open(filepath))
images.append(image)
return images
def run_length_encoding(x):
# https://www.kaggle.com/c/data-science-bowl-2018/discussion/48561#
bs = np.where(x.T.flatten())[0]
rle = []
prev = -2
for b in bs:
if (b > prev + 1):
rle.extend((b + 1, 0))
rle[-1] += 1
prev = b
return rle
def run_length_decoding(mask_rle, shape):
"""
Based on https://www.kaggle.com/msl23518/visualize-the-stage1-test-solution and modified
Args:
mask_rle: run-length as string formatted (start length)
shape: (height, width) of array to return
Returns:
numpy array, 1 - mask, 0 - background
"""
s = mask_rle.split()
starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]
starts -= 1
ends = starts + lengths
img = np.zeros(shape[1] * shape[0], dtype=np.uint8)
for lo, hi in zip(starts, ends):
img[lo:hi] = 1
return img.reshape((shape[1], shape[0])).T
def generate_metadata(train_images_dir, test_images_dir, depths_filepath):
depths = pd.read_csv(depths_filepath)
metadata = {}
for filename in tqdm(os.listdir(os.path.join(train_images_dir, 'images'))):
image_filepath = os.path.join(train_images_dir, 'images', filename)
mask_filepath = os.path.join(train_images_dir, 'masks', filename)
image_id = filename.split('.')[0]
depth = depths[depths['id'] == image_id]['z'].values[0]
size = (np.array(Image.open(mask_filepath)) > 0).astype(np.uint8).sum()
is_not_empty = int(size != 0)
metadata.setdefault('file_path_image', []).append(image_filepath)
metadata.setdefault('file_path_mask', []).append(mask_filepath)
metadata.setdefault('is_train', []).append(1)
metadata.setdefault('id', []).append(image_id)
metadata.setdefault('z', []).append(depth)
metadata.setdefault('size', []).append(size)
metadata.setdefault('is_not_empty', []).append(is_not_empty)
for filename in tqdm(os.listdir(os.path.join(test_images_dir, 'images'))):
image_filepath = os.path.join(test_images_dir, 'images', filename)
image_id = filename.split('.')[0]
depth = depths[depths['id'] == image_id]['z'].values[0]
size = np.nan
is_not_empty = np.nan
metadata.setdefault('file_path_image', []).append(image_filepath)
metadata.setdefault('file_path_mask', []).append(None)
metadata.setdefault('is_train', []).append(0)
metadata.setdefault('id', []).append(image_id)
metadata.setdefault('z', []).append(depth)
metadata.setdefault('size', []).append(size)
metadata.setdefault('is_not_empty', []).append(is_not_empty)
return | pd.DataFrame(metadata) | pandas.DataFrame |
import datetime as dt
import os
import pickle
from typing import Dict, List
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras import activations
from dl_portfolio.logger import LOGGER
from dl_portfolio.data import get_features
from dl_portfolio.pca_ae import build_model
from dl_portfolio.regularizers import WeightsOrthogonality
from dl_portfolio.regressors.nonnegative_linear.ridge import NonnegativeRidge
from dl_portfolio.regressors.nonnegative_linear.base import NonnegativeLinear
from dl_portfolio.constant import BASE_FACTOR_ORDER_DATASET2, BASE_FACTOR_ORDER_DATASET1
from sklearn.linear_model import LinearRegression, Lasso
LOG_BASE_DIR = './dl_portfolio/log'
def build_linear_model(ae_config, reg_type: str, **kwargs):
if reg_type == 'nn_ridge':
if ae_config.l_name == 'l2':
alpha = kwargs.get('alpha', ae_config.l)
kwargs['alpha'] = alpha
else:
alpha = kwargs.get('alpha')
assert alpha is not None
model = NonnegativeRidge(**kwargs)
elif reg_type == 'nn_ls_custom':
model = NonnegativeLinear()
elif reg_type == 'nn_ls':
model = LinearRegression(positive=True, fit_intercept=False, **kwargs)
elif reg_type == 'nn_lasso':
if ae_config.l_name == 'l1':
alpha = kwargs.get('alpha', ae_config.l)
kwargs['alpha'] = alpha
else:
alpha = kwargs.get('alpha')
assert alpha is not None
model = Lasso(positive=True, fit_intercept=False, **kwargs)
else:
raise NotImplementedError(reg_type)
return model
def fit_nnls_one_cv(cv: int, test_set: str, data: pd.DataFrame, assets: List[str], base_dir: str,
ae_config, reg_type: str = 'nn_ridge', **kwargs):
model, scaler, dates, test_data, test_features, prediction, embedding, decoding = load_result(ae_config,
test_set,
data,
assets,
base_dir,
cv)
prediction -= scaler['attributes']['mean_']
prediction /= np.sqrt(scaler['attributes']['var_'])
mse_or = np.mean((test_data - prediction) ** 2, 0)
relu_activation_layer = tf.keras.Model(inputs=model.input, outputs=model.get_layer('encoder').output)
relu_activation = relu_activation_layer.predict(test_data)
relu_activation = pd.DataFrame(relu_activation, index=prediction.index)
# Fit linear encoder to the factors
# input_dim = model.layers[0].input_shape[0][-1]
# encoding_dim = model.layers[1].output_shape[-1]
# vlin_encoder = create_linear_encoder_with_constraint(input_dim, encoding_dim)
# lin_encoder.fit(test_data_i, relu_activation_i, batch_size = 1, epochs=500, verbose=2,
# max_queue_size=20, workers=2*os.cpu_count()-1, use_multiprocessing=True)
# factors_nnls_i = lin_encoder.predict(test_data_i)
# lin_embedding = pd.DataFrame(encoder.layers[1].weights[0].numpy(), index=embed.index)
# # Fit non-negative linear least square to the factor
reg_nnls = build_linear_model(ae_config, reg_type, **kwargs)
x = test_data.copy()
mean_ = np.mean(x, 0)
# Center the data as we do not fit intercept
x = x - mean_
reg_nnls.fit(x, relu_activation)
# Now compute intercept: it is just the mean of the dependent variable
intercept_ = np.mean(relu_activation).values
factors_nnls = reg_nnls.predict(x) + intercept_
factors_nnls = pd.DataFrame(factors_nnls, index=prediction.index)
# Get reconstruction error based on nnls embedding
if ae_config.model_type == "pca_ae_model":
# For PCA AE model encoder and decoder share weights
weights = reg_nnls.coef_.copy()
# Compute bias (reconstruction intercept)
bias = mean_ - np.dot(np.mean(factors_nnls, 0), weights)
elif ae_config.model_type == "ae_model":
weights = model.get_layer('decoder').get_weights()[0]
bias = model.get_layer('decoder').get_weights()[1]
else:
raise NotImplementedError(ae_config.model_type)
# Reconstruction
pred_nnls_model = np.dot(factors_nnls, weights) + bias
mse_nnls_model = np.mean((test_data - pred_nnls_model) ** 2, 0)
# pred_nnls_factors = pd.concat([pred_nnls_factors, pd.DataFrame(pred_nnls_factors_i,
# columns=pred.columns,
# index=pred.index)])
pred_nnls_model = pd.DataFrame(pred_nnls_model, columns=prediction.columns, index=prediction.index)
test_data = pd.DataFrame(test_data, columns=prediction.columns, index=prediction.index)
reg_coef = pd.DataFrame(weights.T, index=embedding.index)
return test_data, embedding, decoding, reg_coef, relu_activation, factors_nnls, prediction, pred_nnls_model, mse_or, mse_nnls_model
def get_nnls_analysis(test_set: str, data: pd.DataFrame, assets: List[str], base_dir: str, ae_config,
reg_type: str = 'nn_ridge', **kwargs):
"""
:param test_set:
:param data:
:param assets:
:param base_dir:
:param ae_config:
:param reg_type: regression type to fit "nn_ridge" for non negative Ridge or "nn_ls" for non negative LS
:return:
"""
test_data = pd.DataFrame()
prediction = pd.DataFrame()
# pred_nnls_factors = pd.DataFrame()
pred_nnls_model = pd.DataFrame()
factors_nnls = pd.DataFrame()
relu_activation = pd.DataFrame()
embedding = {}
decoding = {}
reg_coef = {}
mse = {
'original': [],
'nnls_factors': [],
'nnls_model': []
}
# cv = 0
for cv in ae_config.data_specs:
LOGGER.info(f'CV: {cv}')
test_data_i, embedding_i, decoding_i, reg_coef_i, relu_activation_i, factors_nnls_i, pred, pred_nnls_model_i, mse_or, mse_nnls_model = fit_nnls_one_cv(
cv,
test_set,
data,
assets,
base_dir,
ae_config,
reg_type=reg_type,
**kwargs)
embedding[cv] = embedding_i
decoding[cv] = decoding_i
reg_coef[cv] = reg_coef_i
relu_activation = pd.concat([relu_activation, relu_activation_i])
factors_nnls = pd.concat([factors_nnls, factors_nnls_i])
prediction = pd.concat([prediction, pred])
pred_nnls_model = pd.concat([pred_nnls_model, pred_nnls_model_i])
test_data = pd.concat([test_data, test_data_i])
mse['original'].append(mse_or)
mse['nnls_model'].append(mse_nnls_model)
results = {
'test_data': test_data,
'prediction': prediction,
# 'pred_nnls_factors': pred_nnls_factors,
'pred_nnls_model': pred_nnls_model,
'factors_nnls': factors_nnls,
'relu_activation': relu_activation,
'mse': mse,
'embedding': embedding,
'decoding': decoding,
'reg_coef': reg_coef
}
return results
def reorder_columns(data, new_order):
return data.iloc[:, new_order]
def load_result_wrapper(config, test_set: str, data: pd.DataFrame, assets: List[str], base_dir: str,
reorder_features: bool = True, first_cv=None):
test_data = pd.DataFrame()
prediction = pd.DataFrame()
features = pd.DataFrame()
relu_activation = pd.DataFrame()
residuals = pd.DataFrame()
embedding = {}
decoding = {}
cvs = list(config.data_specs.keys())
if first_cv:
cvs = [cv for cv in cvs if cv >= first_cv]
for cv in cvs:
embedding[cv] = {}
model, scaler, dates, t_data, f, pred, embed, decod, relu_act = load_result(config,
test_set,
data,
assets,
base_dir,
cv,
reorder_features)
t_data = pd.DataFrame(t_data, columns=pred.columns, index=pred.index)
t_data *= scaler["attributes"]["scale_"]
t_data += scaler["attributes"]["mean_"]
test_data = pd.concat([test_data, t_data])
prediction = pd.concat([prediction, pred])
features = pd.concat([features, f])
if relu_act is not None:
relu_activation = pd.concat([relu_activation, relu_act])
residuals = pd.concat([residuals, t_data - pred])
embedding[cv] = embed
decoding[cv] = decod
return test_data, prediction, features, residuals, embedding, decoding, relu_activation
def get_linear_encoder(config, test_set: str, data: pd.DataFrame, assets: List[str], base_dir: str, cv: str,
reorder_features=True):
"""
:param model_type: 'ae' or 'nmf'
:param test_set:
:param data:
:param assets:
:param base_dir:
:param cv:
:param ae_config:
:return:
"""
model_type = config.model_type
assert model_type in ["pca_ae_model", "ae_model", "convex_nmf", "semi_nmf"]
assert test_set in ["train", "val", "test"]
scaler = pickle.load(open(f'{base_dir}/{cv}/scaler.p', 'rb'))
input_dim = len(assets)
model, encoder, extra_features = build_model(config.model_type,
input_dim,
config.encoding_dim,
n_features=None,
extra_features_dim=1,
activation=config.activation,
batch_normalization=config.batch_normalization,
kernel_initializer=config.kernel_initializer,
kernel_constraint=config.kernel_constraint,
kernel_regularizer=config.kernel_regularizer,
activity_regularizer=config.activity_regularizer,
loss=config.loss,
uncorrelated_features=config.uncorrelated_features,
weightage=config.weightage)
model.load_weights(f'{base_dir}/{cv}/model.h5')
layer_name = list(filter(lambda x: 'uncorrelated_features_layer' in x, [l.name for l in model.layers]))[0]
encoder = tf.keras.Model(inputs=model.input, outputs=model.get_layer(layer_name).output)
dense_layer = tf.keras.Model(inputs=model.input, outputs=model.get_layer('encoder').output)
dense_layer.layers[-1].activation = activations.linear
assert dense_layer.layers[-1].activation == activations.linear
assert encoder.layers[1].activation == activations.linear
data_spec = config.data_specs[cv]
if test_set == 'test':
_, _, test_data, _, dates, _ = get_features(data,
data_spec['start'],
data_spec['end'],
assets,
val_start=data_spec['val_start'],
test_start=data_spec.get('test_start'),
scaler=scaler)
elif test_set == 'val':
_, test_data, _, _, dates, _ = get_features(data,
data_spec['start'],
data_spec['end'],
assets,
val_start=data_spec['val_start'],
test_start=data_spec.get('test_start'),
scaler=scaler)
elif test_set == 'train':
# For first cv: predict on train data then for the others used previous validation data for prediction
test_data, _, _, _, dates, _ = get_features(data,
data_spec['start'],
data_spec['end'],
assets,
val_start=data_spec['val_start'],
test_start=data_spec.get('test_start'),
scaler=scaler)
else:
raise NotImplementedError(test_set)
# Prediction
test_features = encoder.predict(test_data)
lin_activation = dense_layer.predict(test_data)
index = dates[test_set]
test_features = pd.DataFrame(test_features, index=index)
lin_activation = | pd.DataFrame(lin_activation, index=index) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Create the economic tables required to run the MRIA model.
"""
import numpy as np
import pandas as pd
class io_basic(object):
"""
This is the class object **io_basic** which is used to set up the table.
"""
def __init__(self, name, filepath, list_regions):
"""
Creation of a the object instance, specify the file path and sectors to include.
Parameters
- *self* - **io_basic** class object
- name - string name for the **io_basic** class
- filepath - string path name to location of IO table
- list_regions - list of regions to include
Output
- *self*.name - string name of the model in the **io_basic** class
- *self*.file - filepath in the **MRIA_IO** class
- *self*.regions - list of regions in the **MRIA_IO** class
- *self*.total_regions - Integer of total amount of regions in the **io_basic** class
"""
self.name = name
self.file = filepath
self.regions = list_regions
self.total_regions = len(list_regions)
def load_labels(self):
"""
Load all labels for the **io_basic** class.
Parameters
- *self* - **io_basic** class object
Output
- *self*.FD_labels - labels for Final Demand columns in the **io_basic** class
- *self*.FD_cat - labels for Final Demand categories in the **io_basic** class
- *self*.Exp_labels - labels for Export columns in the **io_basic** class
- *self*.T_labels - region and sector labels for Z-matrix in the **io_basic** class
- *self*.VA_labels - labels for Value Added in the **io_basic** class
- *self*.sectors - labels for the sectors in the **io_basic** class
"""
if 'xls' in self.file:
FD_labels = pd.read_excel(self.file, sheet_name="labels_FD",
names=['reg', 'tfd'], header=None)
Exp_labels = pd.read_excel(self.file, sheet_name="labels_ExpROW", names=[
'export'], header=None)
T_labels = pd.read_excel(self.file, sheet_name="labels_T",
header=None, names=['reg', 'ind'])
VA_labels = pd.read_excel(self.file, sheet_name="labels_VA", names=[
'Import', 'ValueA'], header=None)
if len(self.regions) == 0:
self.regions = list(T_labels['reg'].unique())
self.total_regions = len(self.regions)
self.FD_labels = FD_labels
self.FD_cat = list(self.FD_labels['tfd'].unique())
self.Exp_labels = Exp_labels
self.T_labels = T_labels
self.VA_labels = VA_labels
self.sectors = list(T_labels['ind'].unique())
def load_all_data(self):
"""
Load all data for the **io_basic** class.
Parameters
- *self* - **io_basic** class object
Output
- *self*.FD_data - pandas Dataframe of Final Demand in the **io_basic** class
- *self*.T_data - pandas Dataframe of Z matrix in the **io_basic** class
- *self*.VA_data - pandas Dataframe of Value Added in the **io_basic** class
- *self*.ImpROW_data - pandas Dataframe of import from the Rest of the World in the **io_basic** class
- *self*.ExpROW_data - pandas Dataframe of exports to the Rest of The World in the **io_basic** class
"""
try:
self.FD_labels is None
except:
self.load_labels()
#LOAD DATA
FD_data = pd.read_excel(self.file, sheet_name="FD", header=None)
T_data = pd.read_excel(self.file, sheet_name="T", header=None)
VA_data = pd.read_excel(self.file, sheet_name="VA", header=None)
ExpROW_data = pd.read_excel(self.file, sheet_name="ExpROW", header=None)
# Add labels to the data from 'load_labels'
FD_data.index = pd.MultiIndex.from_arrays(self.T_labels.values.T)
ExpROW_data.index = pd.MultiIndex.from_arrays(self.T_labels.values.T)
T_data.index = pd.MultiIndex.from_arrays(self.T_labels.values.T)
reg_label = np.array(
list(self.T_labels.values.T[0])+list(self.FD_labels.values.T[0])+['export'])
ind_label = np.array(
list(self.T_labels.values.T[1])+list(self.FD_labels.values.T[1])+['export'])
va_index = np.vstack((reg_label, ind_label))
VA_data.index = pd.MultiIndex.from_arrays(va_index)
FD_data.columns = pd.MultiIndex.from_arrays(self.FD_labels.values.T)
ExpROW_data.columns = pd.MultiIndex.from_arrays(self.Exp_labels.values.T)
T_data.columns = | pd.MultiIndex.from_arrays(self.T_labels.values.T) | pandas.MultiIndex.from_arrays |
import csv
import requests
from urllib.request import urlopen
from bs4 import BeautifulSoup
import pandas as pd
from google.cloud.storage.blob import Blob
from google.cloud import storage
import yfinance as yf
import datetime
from datetime import datetime, timedelta
import os
import shutil
import logging
import os.path
import datetime
import tempfile
import base64
import json
from google.cloud import pubsub_v1
from dateutil.relativedelta import relativedelta
from pytz import timezone
def pub_bq_load_msg(file_to_load, bucket, store_path, project, region):
REGION = region
PROJECT_ID = project
RECEIVING_FUNCTION = 'publish'
function_url = f'https://{REGION}-{PROJECT_ID}.cloudfunctions.net/{RECEIVING_FUNCTION}'
if file_to_load == 'micro_cap_etf_lst.csv':
table_name = "top_micro_cap_etf"
else:
table_name = "etf_ytd_daily_summary"
param = {"project":project,"region":region,"topic":"load_etf_dataset","message":{"tgt_dataset":"etf_dataset", "tgt_tbl_name":table_name, "bucket":bucket, "store_path":store_path}}
data=json.dumps(param)
logging.info('topic-message passed:{}'.format(data))
r = requests.post(function_url, json=param)
logging.info('request post header:{} request post status:{}'.format(r.headers, r.status_code))
def load_file_to_storage(bucketname, file_path, store_path, project, region):
storage_client = storage.Client()
bucket = storage_client.get_bucket(bucketname)
data=bucket.blob(store_path)
data.upload_from_filename(file_path)
file_name = os.path.basename(file_path)
logging.info('load_file_to_storage:file to trans={}'.format(file_name))
# pub_bq_tran_msg (file_name, project, region)
pub_bq_load_msg(file_name, bucketname, store_path, project, region)
def get_hist_etf_price(bucket, destdir, file_loc, project, region):
etf_sym_nm= | pd.read_csv(file_loc) | pandas.read_csv |
import pandas as pd
import numpy as np
# from pandas.core.tools.datetimes import normalize_date
from pandas._libs import tslib
from backend.robinhood_api import RobinhoodAPI
class RobinhoodData:
"""
Wrapper to download orders and dividends from Robinhood accounts
Downloads two dataframes and saves to datafile
----------
Parameters:
datafile : location of h5 datafile
"""
def __init__(self, datafile):
self.datafile = datafile
def _login(self, user, password):
self.client = RobinhoodAPI()
# try import the module with passwords
try:
_temp = __import__('auth')
self.client.login(_temp.local_user, _temp.local_password)
except:
self.client.login(username=user, password=password)
return self
# private method for getting all orders
def _fetch_json_by_url(self, url):
return self.client.session.get(url).json()
# deleting sensitive or redundant fields
def _delete_sensitive_fields(self, df):
for col in ['account', 'url', 'id', 'instrument']:
if col in df:
del df[col]
return df
# download orders and fields requiring RB client
def _download_orders(self):
print("Downloading orders from Robinhood")
orders = []
past_orders = self.client.order_history()
orders.extend(past_orders['results'])
while past_orders['next']:
next_url = past_orders['next']
past_orders = self._fetch_json_by_url(next_url)
orders.extend(past_orders['results'])
df = pd.DataFrame(orders)
df['symbol'] = df['instrument'].apply(
self.client.get_symbol_by_instrument)
df.sort_values(by='created_at', inplace=True)
df.reset_index(inplace=True, drop=True)
df_ord = self._delete_sensitive_fields(df)
return df_ord
# download dividends and fields requiring RB client
def _download_dividends(self):
print("Downloading dividends from Robinhood")
dividends = self.client.dividends()
dividends = [x for x in dividends['results']]
df = pd.DataFrame(dividends)
if df.shape[0] > 0:
df['symbol'] = df['instrument'].apply(
self.client.get_symbol_by_instrument)
df.sort_values(by='paid_at', inplace=True)
df.reset_index(inplace=True, drop=True)
df_div = self._delete_sensitive_fields(df)
else:
df_div = pd.DataFrame(columns=['symbol', 'amount', 'position',
'rate', 'paid_at', 'payable_date'])
return df_div
# process orders
def _process_orders(self, df_ord):
# assign to df and reduce the number of fields
df = df_ord.copy()
fields = [
'created_at',
'average_price', 'cumulative_quantity', 'fees',
'symbol', 'side']
df = df[fields]
# convert types
for field in ['average_price', 'cumulative_quantity', 'fees']:
df[field] = pd.to_numeric(df[field])
for field in ['created_at']:
df[field] = pd.to_datetime(df[field])
# add days
df['date'] = df['created_at'].apply(
lambda x: tslib.normalize_date(x))
# rename columns for consistency
df.rename(columns={
'cumulative_quantity': 'current_size'
}, inplace=True)
# quantity accounting for side of transaction for cumsum later
df['signed_size'] = np.where(
df.side == 'buy',
df['current_size'],
-df['current_size'])
df['signed_size'] = df['signed_size'].astype(np.int64)
return df
# process_orders
def _process_dividends(self, df_div):
df = df_div.copy()
# convert types
for field in ['amount', 'position', 'rate']:
df[field] = | pd.to_numeric(df[field]) | pandas.to_numeric |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import pytest
import numpy as np
import pandas
from pandas.errors import ParserWarning
import pandas._libs.lib as lib
from pandas.core.dtypes.common import is_list_like
from collections import OrderedDict
from modin.db_conn import ModinDatabaseConnection, UnsupportedDatabaseException
from modin.config import TestDatasetSize, Engine, StorageFormat, IsExperimental
from modin.utils import to_pandas
from modin.pandas.utils import from_arrow
import pyarrow as pa
import os
import sys
import shutil
import sqlalchemy as sa
import csv
import tempfile
from .utils import (
check_file_leaks,
df_equals,
json_short_string,
json_short_bytes,
json_long_string,
json_long_bytes,
get_unique_filename,
io_ops_bad_exc,
eval_io_from_str,
dummy_decorator,
create_test_dfs,
COMP_TO_EXT,
teardown_test_file,
teardown_test_files,
generate_dataframe,
)
if StorageFormat.get() == "Omnisci":
from modin.experimental.core.execution.native.implementations.omnisci_on_native.test.utils import (
eval_io,
align_datetime_dtypes,
)
else:
from .utils import eval_io
if StorageFormat.get() == "Pandas":
import modin.pandas as pd
else:
import modin.experimental.pandas as pd
try:
import ray
EXCEPTIONS = (ray.exceptions.WorkerCrashedError,)
except ImportError:
EXCEPTIONS = ()
from modin.config import NPartitions
NPartitions.put(4)
DATASET_SIZE_DICT = {
"Small": 64,
"Normal": 2000,
"Big": 20000,
}
# Number of rows in the test file
NROWS = DATASET_SIZE_DICT.get(TestDatasetSize.get(), DATASET_SIZE_DICT["Small"])
TEST_DATA = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
def assert_files_eq(path1, path2):
with open(path1, "rb") as file1, open(path2, "rb") as file2:
file1_content = file1.read()
file2_content = file2.read()
if file1_content == file2_content:
return True
else:
return False
def setup_clipboard(row_size=NROWS):
df = pandas.DataFrame({"col1": np.arange(row_size), "col2": np.arange(row_size)})
df.to_clipboard()
def parquet_eval_to_file(modin_obj, pandas_obj, fn, extension, **fn_kwargs):
"""
Helper function to test `to_parquet` method.
Parameters
----------
modin_obj : pd.DataFrame
A Modin DataFrame or a Series to test `to_parquet` method.
pandas_obj: pandas.DataFrame
A pandas DataFrame or a Series to test `to_parquet` method.
fn : str
Name of the method, that should be tested.
extension : str
Extension of the test file.
"""
unique_filename_modin = get_unique_filename(extension=extension)
unique_filename_pandas = get_unique_filename(extension=extension)
try:
getattr(modin_obj, fn)(unique_filename_modin, **fn_kwargs)
getattr(pandas_obj, fn)(unique_filename_pandas, **fn_kwargs)
pandas_df = pandas.read_parquet(unique_filename_pandas)
modin_df = pd.read_parquet(unique_filename_modin)
df_equals(pandas_df, modin_df)
finally:
teardown_test_file(unique_filename_pandas)
try:
teardown_test_file(unique_filename_modin)
except IsADirectoryError:
shutil.rmtree(unique_filename_modin)
def eval_to_file(modin_obj, pandas_obj, fn, extension, **fn_kwargs):
"""Helper function to test `to_<extension>` methods.
Args:
modin_obj: Modin DataFrame or Series to test `to_<extension>` method.
pandas_obj: Pandas DataFrame or Series to test `to_<extension>` method.
fn: name of the method, that should be tested.
extension: Extension of the test file.
"""
unique_filename_modin = get_unique_filename(extension=extension)
unique_filename_pandas = get_unique_filename(extension=extension)
try:
# parameter `max_retries=0` is set for `to_csv` function on Ray engine,
# in order to increase the stability of tests, we repeat the call of
# the entire function manually
last_exception = None
for _ in range(3):
try:
getattr(modin_obj, fn)(unique_filename_modin, **fn_kwargs)
except EXCEPTIONS as exc:
last_exception = exc
continue
break
else:
raise last_exception
getattr(pandas_obj, fn)(unique_filename_pandas, **fn_kwargs)
assert assert_files_eq(unique_filename_modin, unique_filename_pandas)
finally:
teardown_test_files([unique_filename_modin, unique_filename_pandas])
@pytest.mark.usefixtures("TestReadCSVFixture")
@pytest.mark.skipif(
IsExperimental.get() and StorageFormat.get() == "Pyarrow",
reason="Segmentation fault; see PR #2347 ffor details",
)
class TestCsv:
# delimiter tests
@pytest.mark.parametrize("sep", [None, "_", ",", ".", "\n"])
@pytest.mark.parametrize("delimiter", ["_", ",", ".", "\n"])
@pytest.mark.parametrize("decimal", [".", "_"])
@pytest.mark.parametrize("thousands", [None, ",", "_", " "])
def test_read_csv_delimiters(
self, make_csv_file, sep, delimiter, decimal, thousands
):
unique_filename = get_unique_filename()
make_csv_file(
filename=unique_filename,
delimiter=delimiter,
thousands_separator=thousands,
decimal_separator=decimal,
)
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=unique_filename,
delimiter=delimiter,
sep=sep,
decimal=decimal,
thousands=thousands,
)
# Column and Index Locations and Names tests
@pytest.mark.parametrize("header", ["infer", None, 0])
@pytest.mark.parametrize("index_col", [None, "col1"])
@pytest.mark.parametrize("prefix", [None, "_", "col"])
@pytest.mark.parametrize(
"names", [lib.no_default, ["col1"], ["c1", "c2", "c3", "c4", "c5", "c6", "c7"]]
)
@pytest.mark.parametrize(
"usecols", [None, ["col1"], ["col1", "col2", "col6"], [0, 1, 5]]
)
@pytest.mark.parametrize("skip_blank_lines", [True, False])
def test_read_csv_col_handling(
self,
header,
index_col,
prefix,
names,
usecols,
skip_blank_lines,
):
if names is lib.no_default:
pytest.skip("some parameters combiantions fails: issue #2312")
if header in ["infer", None] and names is not lib.no_default:
pytest.skip(
"Heterogeneous data in a column is not cast to a common type: issue #3346"
)
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_blank_lines"],
header=header,
index_col=index_col,
prefix=prefix,
names=names,
usecols=usecols,
skip_blank_lines=skip_blank_lines,
)
@pytest.mark.parametrize("usecols", [lambda col_name: col_name in ["a", "b", "e"]])
def test_from_csv_with_callable_usecols(self, usecols):
fname = "modin/pandas/test/data/test_usecols.csv"
pandas_df = pandas.read_csv(fname, usecols=usecols)
modin_df = pd.read_csv(fname, usecols=usecols)
df_equals(modin_df, pandas_df)
# General Parsing Configuration
@pytest.mark.parametrize("dtype", [None, True])
@pytest.mark.parametrize("engine", [None, "python", "c"])
@pytest.mark.parametrize(
"converters",
[
None,
{
"col1": lambda x: np.int64(x) * 10,
"col2": pandas.to_datetime,
"col4": lambda x: x.replace(":", ";"),
},
],
)
@pytest.mark.parametrize("skipfooter", [0, 10])
def test_read_csv_parsing_1(
self,
dtype,
engine,
converters,
skipfooter,
):
if dtype:
dtype = {
col: "object"
for col in pandas.read_csv(
pytest.csvs_names["test_read_csv_regular"], nrows=1
).columns
}
eval_io(
fn_name="read_csv",
check_exception_type=None, # issue #2320
raising_exceptions=None,
check_kwargs_callable=not callable(converters),
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_regular"],
dtype=dtype,
engine=engine,
converters=converters,
skipfooter=skipfooter,
)
@pytest.mark.parametrize("header", ["infer", None, 0])
@pytest.mark.parametrize(
"skiprows",
[
2,
lambda x: x % 2,
lambda x: x > 25,
lambda x: x > 128,
np.arange(10, 50),
np.arange(10, 50, 2),
],
)
@pytest.mark.parametrize("nrows", [35, None])
@pytest.mark.parametrize(
"names",
[
[f"c{col_number}" for col_number in range(4)],
[f"c{col_number}" for col_number in range(6)],
None,
],
)
@pytest.mark.parametrize("encoding", ["latin1", "windows-1251", None])
def test_read_csv_parsing_2(
self,
make_csv_file,
request,
header,
skiprows,
nrows,
names,
encoding,
):
xfail_case = (
StorageFormat.get() == "Omnisci"
and header is not None
and isinstance(skiprows, int)
and names is None
and nrows is None
)
if xfail_case:
pytest.xfail(
"read_csv fails because of duplicated columns names - issue #3080"
)
if request.config.getoption(
"--simulate-cloud"
).lower() != "off" and is_list_like(skiprows):
pytest.xfail(
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #2340"
)
if encoding:
unique_filename = get_unique_filename()
make_csv_file(
filename=unique_filename,
encoding=encoding,
)
kwargs = {
"filepath_or_buffer": unique_filename
if encoding
else pytest.csvs_names["test_read_csv_regular"],
"header": header,
"skiprows": skiprows,
"nrows": nrows,
"names": names,
"encoding": encoding,
}
if Engine.get() != "Python":
df = pandas.read_csv(**dict(kwargs, nrows=1))
# in that case first partition will contain str
if df[df.columns[0]][df.index[0]] in ["c1", "col1", "c3", "col3"]:
pytest.xfail("read_csv incorrect output with float data - issue #2634")
eval_io(
fn_name="read_csv",
check_exception_type=None, # issue #2320
raising_exceptions=None,
check_kwargs_callable=not callable(skiprows),
# read_csv kwargs
**kwargs,
)
@pytest.mark.parametrize("true_values", [["Yes"], ["Yes", "true"], None])
@pytest.mark.parametrize("false_values", [["No"], ["No", "false"], None])
@pytest.mark.parametrize("skipfooter", [0, 10])
@pytest.mark.parametrize("nrows", [35, None])
def test_read_csv_parsing_3(
self,
true_values,
false_values,
skipfooter,
nrows,
):
xfail_case = (
(false_values or true_values)
and Engine.get() != "Python"
and StorageFormat.get() != "Omnisci"
)
if xfail_case:
pytest.xfail("modin and pandas dataframes differs - issue #2446")
eval_io(
fn_name="read_csv",
check_exception_type=None, # issue #2320
raising_exceptions=None,
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_yes_no"],
true_values=true_values,
false_values=false_values,
skipfooter=skipfooter,
nrows=nrows,
)
def test_read_csv_skipinitialspace(self):
unique_filename = get_unique_filename()
str_initial_spaces = (
"col1,col2,col3,col4\n"
"five, six, seven, eight\n"
" five, six, seven, eight\n"
"five, six, seven, eight\n"
)
eval_io_from_str(str_initial_spaces, unique_filename, skipinitialspace=True)
@pytest.mark.parametrize(
"test_case",
["single_element", "single_column", "multiple_columns"],
)
def test_read_csv_squeeze(self, request, test_case):
if request.config.getoption("--simulate-cloud").lower() != "off":
pytest.xfail(
reason="Error EOFError: stream has been closed in `modin in the cloud` mode - issue #3329"
)
unique_filename = get_unique_filename()
str_single_element = "1"
str_single_col = "1\n2\n3\n"
str_four_cols = "1, 2, 3, 4\n5, 6, 7, 8\n9, 10, 11, 12\n"
case_to_data = {
"single_element": str_single_element,
"single_column": str_single_col,
"multiple_columns": str_four_cols,
}
eval_io_from_str(case_to_data[test_case], unique_filename, squeeze=True)
eval_io_from_str(
case_to_data[test_case], unique_filename, header=None, squeeze=True
)
def test_read_csv_mangle_dupe_cols(self):
if StorageFormat.get() == "Omnisci":
pytest.xfail(
"processing of duplicated columns in OmniSci storage format is not supported yet - issue #3080"
)
unique_filename = get_unique_filename()
str_non_unique_cols = "col,col,col,col\n5, 6, 7, 8\n9, 10, 11, 12\n"
eval_io_from_str(str_non_unique_cols, unique_filename, mangle_dupe_cols=True)
# NA and Missing Data Handling tests
@pytest.mark.parametrize("na_values", ["custom_nan", "73"])
@pytest.mark.parametrize("keep_default_na", [True, False])
@pytest.mark.parametrize("na_filter", [True, False])
@pytest.mark.parametrize("verbose", [True, False])
@pytest.mark.parametrize("skip_blank_lines", [True, False])
def test_read_csv_nans_handling(
self,
na_values,
keep_default_na,
na_filter,
verbose,
skip_blank_lines,
):
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_nans"],
na_values=na_values,
keep_default_na=keep_default_na,
na_filter=na_filter,
verbose=verbose,
skip_blank_lines=skip_blank_lines,
)
# Datetime Handling tests
@pytest.mark.parametrize(
"parse_dates", [True, False, ["col2"], ["col2", "col4"], [1, 3]]
)
@pytest.mark.parametrize("infer_datetime_format", [True, False])
@pytest.mark.parametrize("keep_date_col", [True, False])
@pytest.mark.parametrize(
"date_parser", [None, lambda x: pandas.datetime.strptime(x, "%Y-%m-%d")]
)
@pytest.mark.parametrize("dayfirst", [True, False])
@pytest.mark.parametrize("cache_dates", [True, False])
def test_read_csv_datetime(
self,
parse_dates,
infer_datetime_format,
keep_date_col,
date_parser,
dayfirst,
cache_dates,
):
if (
StorageFormat.get() == "Omnisci"
and isinstance(parse_dates, list)
and ("col4" in parse_dates or 3 in parse_dates)
):
pytest.xfail(
"In some cases read_csv with `parse_dates` with OmniSci storage format outputs incorrect result - issue #3081"
)
raising_exceptions = io_ops_bad_exc # default value
if isinstance(parse_dates, dict) and callable(date_parser):
# In this case raised TypeError: <lambda>() takes 1 positional argument but 2 were given
raising_exceptions = list(io_ops_bad_exc)
raising_exceptions.remove(TypeError)
eval_io(
fn_name="read_csv",
check_kwargs_callable=not callable(date_parser),
raising_exceptions=raising_exceptions,
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_regular"],
parse_dates=parse_dates,
infer_datetime_format=infer_datetime_format,
keep_date_col=keep_date_col,
date_parser=date_parser,
dayfirst=dayfirst,
cache_dates=cache_dates,
)
# Iteration tests
@pytest.mark.parametrize("iterator", [True, False])
def test_read_csv_iteration(self, iterator):
filename = pytest.csvs_names["test_read_csv_regular"]
# Tests __next__ and correctness of reader as an iterator
# Use larger chunksize to read through file quicker
rdf_reader = pd.read_csv(filename, chunksize=500, iterator=iterator)
pd_reader = pandas.read_csv(filename, chunksize=500, iterator=iterator)
for modin_df, pd_df in zip(rdf_reader, pd_reader):
df_equals(modin_df, pd_df)
# Tests that get_chunk works correctly
rdf_reader = pd.read_csv(filename, chunksize=1, iterator=iterator)
pd_reader = pandas.read_csv(filename, chunksize=1, iterator=iterator)
modin_df = rdf_reader.get_chunk(1)
pd_df = pd_reader.get_chunk(1)
df_equals(modin_df, pd_df)
# Tests that read works correctly
rdf_reader = pd.read_csv(filename, chunksize=1, iterator=iterator)
pd_reader = pandas.read_csv(filename, chunksize=1, iterator=iterator)
modin_df = rdf_reader.read()
pd_df = pd_reader.read()
df_equals(modin_df, pd_df)
def test_read_csv_encoding_976(self):
file_name = "modin/pandas/test/data/issue_976.csv"
names = [str(i) for i in range(11)]
kwargs = {
"sep": ";",
"names": names,
"encoding": "windows-1251",
}
df1 = pd.read_csv(file_name, **kwargs)
df2 = pandas.read_csv(file_name, **kwargs)
# these columns contain data of various types in partitions
# see #1931 for details;
df1 = df1.drop(["4", "5"], axis=1)
df2 = df2.drop(["4", "5"], axis=1)
df_equals(df1, df2)
# Quoting, Compression parameters tests
@pytest.mark.parametrize("compression", ["infer", "gzip", "bz2", "xz", "zip"])
@pytest.mark.parametrize("encoding", [None, "latin8", "utf16"])
@pytest.mark.parametrize("engine", [None, "python", "c"])
def test_read_csv_compression(self, make_csv_file, compression, encoding, engine):
unique_filename = get_unique_filename()
make_csv_file(
filename=unique_filename, encoding=encoding, compression=compression
)
compressed_file_path = (
f"{unique_filename}.{COMP_TO_EXT[compression]}"
if compression != "infer"
else unique_filename
)
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=compressed_file_path,
compression=compression,
encoding=encoding,
engine=engine,
)
@pytest.mark.parametrize(
"encoding",
[
None,
"ISO-8859-1",
"latin1",
"iso-8859-1",
"cp1252",
"utf8",
pytest.param(
"unicode_escape",
marks=pytest.mark.skip(
condition=sys.version_info < (3, 9),
reason="https://bugs.python.org/issue45461",
),
),
"raw_unicode_escape",
"utf_16_le",
"utf_16_be",
"utf32",
"utf_32_le",
"utf_32_be",
"utf-8-sig",
],
)
def test_read_csv_encoding(self, make_csv_file, encoding):
unique_filename = get_unique_filename()
make_csv_file(filename=unique_filename, encoding=encoding)
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=unique_filename,
encoding=encoding,
)
@pytest.mark.parametrize("thousands", [None, ",", "_", " "])
@pytest.mark.parametrize("decimal", [".", "_"])
@pytest.mark.parametrize("lineterminator", [None, "x", "\n"])
@pytest.mark.parametrize("escapechar", [None, "d", "x"])
@pytest.mark.parametrize("dialect", ["test_csv_dialect", None])
def test_read_csv_file_format(
self,
make_csv_file,
thousands,
decimal,
lineterminator,
escapechar,
dialect,
):
if Engine.get() != "Python" and lineterminator == "x":
pytest.xfail("read_csv with Ray engine outputs empty frame - issue #2493")
elif Engine.get() != "Python" and escapechar:
pytest.xfail(
"read_csv with Ray engine fails with some 'escapechar' parameters - issue #2494"
)
elif Engine.get() != "Python" and dialect:
pytest.xfail(
"read_csv with Ray engine fails with `dialect` parameter - issue #2508"
)
unique_filename = get_unique_filename()
if dialect:
test_csv_dialect_params = {
"delimiter": "_",
"doublequote": False,
"escapechar": "\\",
"quotechar": "d",
"quoting": csv.QUOTE_ALL,
}
csv.register_dialect(dialect, **test_csv_dialect_params)
dialect = csv.get_dialect(dialect)
make_csv_file(filename=unique_filename, **test_csv_dialect_params)
else:
make_csv_file(
filename=unique_filename,
thousands_separator=thousands,
decimal_separator=decimal,
escapechar=escapechar,
line_terminator=lineterminator,
)
eval_io(
check_exception_type=None, # issue #2320
raising_exceptions=None,
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=unique_filename,
thousands=thousands,
decimal=decimal,
lineterminator=lineterminator,
escapechar=escapechar,
dialect=dialect,
)
@pytest.mark.parametrize(
"quoting",
[csv.QUOTE_ALL, csv.QUOTE_MINIMAL, csv.QUOTE_NONNUMERIC, csv.QUOTE_NONE],
)
@pytest.mark.parametrize("quotechar", ['"', "_", "d"])
@pytest.mark.parametrize("doublequote", [True, False])
@pytest.mark.parametrize("comment", [None, "#", "x"])
def test_read_csv_quoting(
self,
make_csv_file,
quoting,
quotechar,
doublequote,
comment,
):
# in these cases escapechar should be set, otherwise error occures
# _csv.Error: need to escape, but no escapechar set"
use_escapechar = (
not doublequote and quotechar != '"' and quoting != csv.QUOTE_NONE
)
escapechar = "\\" if use_escapechar else None
unique_filename = get_unique_filename()
make_csv_file(
filename=unique_filename,
quoting=quoting,
quotechar=quotechar,
doublequote=doublequote,
escapechar=escapechar,
comment_col_char=comment,
)
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=unique_filename,
quoting=quoting,
quotechar=quotechar,
doublequote=doublequote,
escapechar=escapechar,
comment=comment,
)
# Error Handling parameters tests
@pytest.mark.parametrize("warn_bad_lines", [True, False, None])
@pytest.mark.parametrize("error_bad_lines", [True, False, None])
@pytest.mark.parametrize("on_bad_lines", ["error", "warn", "skip", None])
def test_read_csv_error_handling(
self,
warn_bad_lines,
error_bad_lines,
on_bad_lines,
):
# in that case exceptions are raised both by Modin and pandas
# and tests pass
raise_exception_case = on_bad_lines is not None and (
error_bad_lines is not None or warn_bad_lines is not None
)
if (
not raise_exception_case
and Engine.get() not in ["Python", "Cloudpython"]
and StorageFormat.get() != "Omnisci"
):
pytest.xfail("read_csv doesn't raise `bad lines` exceptions - issue #2500")
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_bad_lines"],
warn_bad_lines=warn_bad_lines,
error_bad_lines=error_bad_lines,
on_bad_lines=on_bad_lines,
)
# Internal parameters tests
@pytest.mark.parametrize("use_str_data", [True, False])
@pytest.mark.parametrize("engine", [None, "python", "c"])
@pytest.mark.parametrize("delimiter", [",", " "])
@pytest.mark.parametrize("delim_whitespace", [True, False])
@pytest.mark.parametrize("low_memory", [True, False])
@pytest.mark.parametrize("memory_map", [True, False])
@pytest.mark.parametrize("float_precision", [None, "high", "round_trip"])
def test_read_csv_internal(
self,
make_csv_file,
use_str_data,
engine,
delimiter,
delim_whitespace,
low_memory,
memory_map,
float_precision,
):
# In this case raised TypeError: cannot use a string pattern on a bytes-like object,
# so TypeError should be excluded from raising_exceptions list in order to check, that
# the same exceptions are raised by Pandas and Modin
case_with_TypeError_exc = (
engine == "python"
and delimiter == ","
and delim_whitespace
and low_memory
and memory_map
and float_precision is None
)
raising_exceptions = io_ops_bad_exc # default value
if case_with_TypeError_exc:
raising_exceptions = list(io_ops_bad_exc)
raising_exceptions.remove(TypeError)
kwargs = {
"engine": engine,
"delimiter": delimiter,
"delim_whitespace": delim_whitespace,
"low_memory": low_memory,
"memory_map": memory_map,
"float_precision": float_precision,
}
unique_filename = get_unique_filename()
if use_str_data:
str_delim_whitespaces = (
"col1 col2 col3 col4\n5 6 7 8\n9 10 11 12\n"
)
eval_io_from_str(
str_delim_whitespaces,
unique_filename,
raising_exceptions=raising_exceptions,
**kwargs,
)
else:
make_csv_file(
filename=unique_filename,
delimiter=delimiter,
)
eval_io(
filepath_or_buffer=unique_filename,
fn_name="read_csv",
raising_exceptions=raising_exceptions,
**kwargs,
)
# Issue related, specific or corner cases
@pytest.mark.parametrize("nrows", [2, None])
def test_read_csv_bad_quotes(self, nrows):
csv_bad_quotes = (
'1, 2, 3, 4\none, two, three, four\nfive, "six", seven, "eight\n'
)
unique_filename = get_unique_filename()
eval_io_from_str(csv_bad_quotes, unique_filename, nrows=nrows)
def test_read_csv_categories(self):
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer="modin/pandas/test/data/test_categories.csv",
names=["one", "two"],
dtype={"one": "int64", "two": "category"},
)
@pytest.mark.parametrize("encoding", [None, "utf-8"])
@pytest.mark.parametrize("encoding_errors", ["strict", "ignore"])
@pytest.mark.parametrize("parse_dates", [False, ["timestamp"]])
@pytest.mark.parametrize("index_col", [None, 0, 2])
@pytest.mark.parametrize("header", ["infer", 0])
@pytest.mark.parametrize(
"names",
[
None,
["timestamp", "symbol", "high", "low", "open", "close", "spread", "volume"],
],
)
def test_read_csv_parse_dates(
self, names, header, index_col, parse_dates, encoding, encoding_errors
):
if names is not None and header == "infer":
pytest.xfail(
"read_csv with Ray engine works incorrectly with date data and names parameter provided - issue #2509"
)
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer="modin/pandas/test/data/test_time_parsing.csv",
names=names,
header=header,
index_col=index_col,
parse_dates=parse_dates,
encoding=encoding,
encoding_errors=encoding_errors,
)
@pytest.mark.parametrize(
"storage_options",
[{"anon": False}, {"anon": True}, {"key": "123", "secret": "123"}, None],
)
def test_read_csv_s3(self, storage_options):
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer="s3://noaa-ghcn-pds/csv/1788.csv",
storage_options=storage_options,
)
@pytest.mark.parametrize("names", [list("XYZ"), None])
@pytest.mark.parametrize("skiprows", [1, 2, 3, 4, None])
def test_read_csv_skiprows_names(self, names, skiprows):
if StorageFormat.get() == "Omnisci" and names is None and skiprows in [1, None]:
# If these conditions are satisfied, columns names will be inferred
# from the first row, that will contain duplicated values, that is
# not supported by `Omnisci` storage format yet.
pytest.xfail(
"processing of duplicated columns in OmniSci storage format is not supported yet - issue #3080"
)
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer="modin/pandas/test/data/issue_2239.csv",
names=names,
skiprows=skiprows,
)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #2340",
)
def test_read_csv_default_to_pandas(self):
with pytest.warns(UserWarning):
# This tests that we default to pandas on a buffer
from io import StringIO
pd.read_csv(
StringIO(open(pytest.csvs_names["test_read_csv_regular"], "r").read())
)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #2340",
)
def test_read_csv_default_to_pandas_url(self):
# We haven't implemented read_csv from https, but if it's implemented, then this needs to change
eval_io(
fn_name="read_csv",
modin_warning=UserWarning,
# read_csv kwargs
filepath_or_buffer="https://raw.githubusercontent.com/modin-project/modin/master/modin/pandas/test/data/blah.csv",
# It takes about ~17Gb of RAM for Omnisci to import the whole table from this test
# because of too many (~1000) string columns in it. Taking a subset of columns
# to be able to run this test on low-RAM machines.
usecols=[0, 1, 2, 3] if StorageFormat.get() == "Omnisci" else None,
)
@pytest.mark.parametrize("nrows", [21, 5, None])
@pytest.mark.parametrize("skiprows", [4, 1, 500, None])
def test_read_csv_newlines_in_quotes(self, nrows, skiprows):
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer="modin/pandas/test/data/newlines.csv",
nrows=nrows,
skiprows=skiprows,
cast_to_str=StorageFormat.get() != "Omnisci",
)
def test_read_csv_sep_none(self):
eval_io(
fn_name="read_csv",
modin_warning=ParserWarning,
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_regular"],
sep=None,
)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #2340",
)
def test_read_csv_incorrect_data(self):
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer="modin/pandas/test/data/test_categories.json",
)
@pytest.mark.parametrize(
"kwargs",
[
{"names": [5, 1, 3, 4, 2, 6]},
{"names": [0]},
{"names": None, "usecols": [1, 0, 2]},
{"names": [3, 1, 2, 5], "usecols": [4, 1, 3, 2]},
],
)
def test_read_csv_names_neq_num_cols(self, kwargs):
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer="modin/pandas/test/data/issue_2074.csv",
**kwargs,
)
def test_read_csv_wrong_path(self):
raising_exceptions = [e for e in io_ops_bad_exc if e != FileNotFoundError]
eval_io(
fn_name="read_csv",
raising_exceptions=raising_exceptions,
# read_csv kwargs
filepath_or_buffer="/some/wrong/path.csv",
)
@pytest.mark.skipif(
StorageFormat.get() == "Omnisci",
reason="to_csv is not implemented with OmniSci storage format yet - issue #3082",
)
@pytest.mark.parametrize("header", [False, True])
@pytest.mark.parametrize("mode", ["w", "wb+"])
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #2340",
)
def test_to_csv(self, header, mode):
pandas_df = generate_dataframe()
modin_df = pd.DataFrame(pandas_df)
eval_to_file(
modin_obj=modin_df,
pandas_obj=pandas_df,
fn="to_csv",
extension="csv",
header=header,
mode=mode,
)
@pytest.mark.skipif(
StorageFormat.get() == "Omnisci",
reason="to_csv is not implemented with OmniSci storage format yet - issue #3082",
)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #2340",
)
def test_dataframe_to_csv(self):
pandas_df = pandas.read_csv(pytest.csvs_names["test_read_csv_regular"])
modin_df = pd.DataFrame(pandas_df)
eval_to_file(
modin_obj=modin_df, pandas_obj=pandas_df, fn="to_csv", extension="csv"
)
@pytest.mark.skipif(
StorageFormat.get() == "Omnisci",
reason="to_csv is not implemented with OmniSci storage format yet - issue #3082",
)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #2340",
)
def test_series_to_csv(self):
pandas_s = pandas.read_csv(
pytest.csvs_names["test_read_csv_regular"], usecols=["col1"]
).squeeze()
modin_s = pd.Series(pandas_s)
eval_to_file(
modin_obj=modin_s, pandas_obj=pandas_s, fn="to_csv", extension="csv"
)
def test_read_csv_within_decorator(self):
@dummy_decorator()
def wrapped_read_csv(file, method):
if method == "pandas":
return pandas.read_csv(file)
if method == "modin":
return pd.read_csv(file)
pandas_df = wrapped_read_csv(
pytest.csvs_names["test_read_csv_regular"], method="pandas"
)
modin_df = wrapped_read_csv(
pytest.csvs_names["test_read_csv_regular"], method="modin"
)
if StorageFormat.get() == "Omnisci":
# Aligning DateTime dtypes because of the bug related to the `parse_dates` parameter:
# https://github.com/modin-project/modin/issues/3485
modin_df, pandas_df = align_datetime_dtypes(modin_df, pandas_df)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize(
"read_mode",
[
"r",
pytest.param(
"rb",
marks=pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="Cannot pickle file handles. See comments in PR #2625",
),
),
],
)
def test_read_csv_file_handle(self, read_mode, make_csv_file):
unique_filename = get_unique_filename()
make_csv_file(filename=unique_filename)
with open(unique_filename, mode=read_mode) as buffer:
df_pandas = pandas.read_csv(buffer)
buffer.seek(0)
df_modin = pd.read_csv(buffer)
df_equals(df_modin, df_pandas)
def test_unnamed_index(self):
def get_internal_df(df):
partition = read_df._query_compiler._modin_frame._partitions[0][0]
return partition.to_pandas()
path = "modin/pandas/test/data/issue_3119.csv"
read_df = pd.read_csv(path, index_col=0)
assert get_internal_df(read_df).index.name is None
read_df = pd.read_csv(path, index_col=[0, 1])
for name1, name2 in zip(get_internal_df(read_df).index.names, [None, "a"]):
assert name1 == name2
def test_read_csv_empty_frame(self):
eval_io(
fn_name="read_csv",
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_regular"],
usecols=["col1"],
index_col="col1",
)
@pytest.mark.parametrize(
"skiprows",
[
lambda x: x > 20,
lambda x: True,
lambda x: x in [10, 20],
pytest.param(
lambda x: x << 10,
marks=pytest.mark.skipif(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #2340",
),
),
],
)
def test_read_csv_skiprows_corner_cases(self, skiprows):
eval_io(
fn_name="read_csv",
check_kwargs_callable=not callable(skiprows),
# read_csv kwargs
filepath_or_buffer=pytest.csvs_names["test_read_csv_regular"],
skiprows=skiprows,
)
class TestTable:
def test_read_table(self, make_csv_file):
unique_filename = get_unique_filename()
make_csv_file(filename=unique_filename, delimiter="\t")
eval_io(
fn_name="read_table",
# read_table kwargs
filepath_or_buffer=unique_filename,
)
def test_read_table_within_decorator(self, make_csv_file):
unique_filename = get_unique_filename()
make_csv_file(filename=unique_filename, delimiter="\t")
@dummy_decorator()
def wrapped_read_table(file, method):
if method == "pandas":
return pandas.read_table(file)
if method == "modin":
return pd.read_table(file)
pandas_df = wrapped_read_table(unique_filename, method="pandas")
modin_df = wrapped_read_table(unique_filename, method="modin")
df_equals(modin_df, pandas_df)
def test_read_table_empty_frame(self, make_csv_file):
unique_filename = get_unique_filename()
make_csv_file(filename=unique_filename, delimiter="\t")
eval_io(
fn_name="read_table",
# read_table kwargs
filepath_or_buffer=unique_filename,
usecols=["col1"],
index_col="col1",
)
class TestParquet:
@pytest.mark.parametrize("columns", [None, ["col1"]])
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_parquet(self, make_parquet_file, columns):
unique_filename = get_unique_filename(extension="parquet")
make_parquet_file(filename=unique_filename)
eval_io(
fn_name="read_parquet",
# read_parquet kwargs
path=unique_filename,
columns=columns,
)
@pytest.mark.parametrize("columns", [None, ["col1"]])
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_parquet_directory(self, make_parquet_file, columns): #
unique_filename = get_unique_filename(extension=None)
make_parquet_file(filename=unique_filename, directory=True)
eval_io(
fn_name="read_parquet",
# read_parquet kwargs
path=unique_filename,
columns=columns,
)
@pytest.mark.parametrize("columns", [None, ["col1"]])
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_parquet_partitioned_directory(self, make_parquet_file, columns):
unique_filename = get_unique_filename(extension=None)
make_parquet_file(filename=unique_filename, partitioned_columns=["col1"])
eval_io(
fn_name="read_parquet",
# read_parquet kwargs
path=unique_filename,
columns=columns,
)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_parquet_pandas_index(self):
# Ensure modin can read parquet files written by pandas with a non-RangeIndex object
unique_filename = get_unique_filename(extension="parquet")
pandas_df = pandas.DataFrame(
{
"idx": np.random.randint(0, 100_000, size=2000),
"A": np.random.randint(0, 100_000, size=2000),
"B": ["a", "b"] * 1000,
"C": ["c"] * 2000,
}
)
try:
pandas_df.set_index("idx").to_parquet(unique_filename)
# read the same parquet using modin.pandas
df_equals(
pd.read_parquet(unique_filename), pandas.read_parquet(unique_filename)
)
pandas_df.set_index(["idx", "A"]).to_parquet(unique_filename)
df_equals(
pd.read_parquet(unique_filename), pandas.read_parquet(unique_filename)
)
finally:
os.remove(unique_filename)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_parquet_pandas_index_partitioned(self):
# Ensure modin can read parquet files written by pandas with a non-RangeIndex object
unique_filename = get_unique_filename(extension="parquet")
pandas_df = pandas.DataFrame(
{
"idx": np.random.randint(0, 100_000, size=2000),
"A": np.random.randint(0, 10, size=2000),
"B": ["a", "b"] * 1000,
"C": ["c"] * 2000,
}
)
try:
pandas_df.set_index("idx").to_parquet(unique_filename, partition_cols=["A"])
# read the same parquet using modin.pandas
df_equals(
pd.read_parquet(unique_filename), pandas.read_parquet(unique_filename)
)
finally:
shutil.rmtree(unique_filename)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_parquet_hdfs(self):
eval_io(
fn_name="read_parquet",
# read_parquet kwargs
path="modin/pandas/test/data/hdfs.parquet",
)
@pytest.mark.parametrize("path_type", ["url", "object"])
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_parquet_s3(self, path_type):
dataset_url = "s3://modin-datasets/testing/test_data.parquet"
if path_type == "object":
import s3fs
fs = s3fs.S3FileSystem(anon=True)
with fs.open(dataset_url, "rb") as file_obj:
eval_io("read_parquet", path=file_obj)
else:
eval_io("read_parquet", path=dataset_url, storage_options={"anon": True})
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_parquet_without_metadata(self):
"""Test that Modin can read parquet files not written by pandas."""
from pyarrow import csv
from pyarrow import parquet
parquet_fname = get_unique_filename(extension="parquet")
csv_fname = get_unique_filename(extension="parquet")
pandas_df = pandas.DataFrame(
{
"idx": np.random.randint(0, 100_000, size=2000),
"A": np.random.randint(0, 10, size=2000),
"B": ["a", "b"] * 1000,
"C": ["c"] * 2000,
}
)
try:
pandas_df.to_csv(csv_fname, index=False)
# read into pyarrow table and write it to a parquet file
t = csv.read_csv(csv_fname)
parquet.write_table(t, parquet_fname)
df_equals(
pd.read_parquet(parquet_fname), pandas.read_parquet(parquet_fname)
)
finally:
teardown_test_files([parquet_fname, csv_fname])
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_to_parquet(self):
modin_df, pandas_df = create_test_dfs(TEST_DATA)
parquet_eval_to_file(
modin_obj=modin_df,
pandas_obj=pandas_df,
fn="to_parquet",
extension="parquet",
)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_parquet_2462(self):
test_df = pandas.DataFrame({"col1": [["ad_1", "ad_2"], ["ad_3"]]})
with tempfile.TemporaryDirectory() as directory:
path = f"{directory}/data"
os.makedirs(path)
test_df.to_parquet(path + "/part-00000.parquet")
read_df = pd.read_parquet(path)
df_equals(test_df, read_df)
class TestJson:
@pytest.mark.parametrize("lines", [False, True])
def test_read_json(self, make_json_file, lines):
eval_io(
fn_name="read_json",
# read_json kwargs
path_or_buf=make_json_file(lines=lines),
lines=lines,
)
@pytest.mark.parametrize(
"storage_options",
[{"anon": False}, {"anon": True}, {"key": "123", "secret": "123"}, None],
)
def test_read_json_s3(self, storage_options):
eval_io(
fn_name="read_json",
path_or_buf="s3://modin-datasets/testing/test_data.json",
lines=True,
orient="records",
storage_options=storage_options,
)
def test_read_json_categories(self):
eval_io(
fn_name="read_json",
# read_json kwargs
path_or_buf="modin/pandas/test/data/test_categories.json",
dtype={"one": "int64", "two": "category"},
)
@pytest.mark.parametrize(
"data",
[json_short_string, json_short_bytes, json_long_string, json_long_bytes],
)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_json_string_bytes(self, data):
with pytest.warns(UserWarning):
modin_df = pd.read_json(data)
# For I/O objects we need to rewind to reuse the same object.
if hasattr(data, "seek"):
data.seek(0)
df_equals(modin_df, pandas.read_json(data))
def test_to_json(self):
modin_df, pandas_df = create_test_dfs(TEST_DATA)
eval_to_file(
modin_obj=modin_df, pandas_obj=pandas_df, fn="to_json", extension="json"
)
@pytest.mark.parametrize(
"read_mode",
[
"r",
pytest.param(
"rb",
marks=pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="Cannot pickle file handles. See comments in PR #2625",
),
),
],
)
def test_read_json_file_handle(self, make_json_file, read_mode):
with open(make_json_file(), mode=read_mode) as buf:
df_pandas = pandas.read_json(buf)
buf.seek(0)
df_modin = pd.read_json(buf)
df_equals(df_pandas, df_modin)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_json_metadata(self, make_json_file):
# `lines=True` is for triggering Modin implementation,
# `orient="records"` should be set if `lines=True`
df = pd.read_json(
make_json_file(ncols=80, lines=True), lines=True, orient="records"
)
parts_width_cached = df._query_compiler._modin_frame._column_widths_cache
num_splits = len(df._query_compiler._modin_frame._partitions[0])
parts_width_actual = [
len(df._query_compiler._modin_frame._partitions[0][i].get().columns)
for i in range(num_splits)
]
assert parts_width_cached == parts_width_actual
class TestExcel:
@check_file_leaks
def test_read_excel(self, make_excel_file):
eval_io(
fn_name="read_excel",
# read_excel kwargs
io=make_excel_file(),
)
@check_file_leaks
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_excel_engine(self, make_excel_file):
eval_io(
fn_name="read_excel",
modin_warning=UserWarning,
# read_excel kwargs
io=make_excel_file(),
engine="openpyxl",
)
@check_file_leaks
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_excel_index_col(self, make_excel_file):
eval_io(
fn_name="read_excel",
modin_warning=UserWarning,
# read_excel kwargs
io=make_excel_file(),
index_col=0,
)
@check_file_leaks
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_excel_all_sheets(self, make_excel_file):
unique_filename = make_excel_file()
pandas_df = pandas.read_excel(unique_filename, sheet_name=None)
modin_df = pd.read_excel(unique_filename, sheet_name=None)
assert isinstance(pandas_df, (OrderedDict, dict))
assert isinstance(modin_df, type(pandas_df))
assert pandas_df.keys() == modin_df.keys()
for key in pandas_df.keys():
df_equals(modin_df.get(key), pandas_df.get(key))
@pytest.mark.xfail(
Engine.get() != "Python",
reason="pandas throws the exception. See pandas issue #39250 for more info",
)
@check_file_leaks
def test_read_excel_sheetname_title(self):
eval_io(
fn_name="read_excel",
# read_excel kwargs
io="modin/pandas/test/data/excel_sheetname_title.xlsx",
)
@check_file_leaks
def test_excel_empty_line(self):
path = "modin/pandas/test/data/test_emptyline.xlsx"
modin_df = pd.read_excel(path)
assert str(modin_df)
@pytest.mark.parametrize(
"sheet_name",
[
"Sheet1",
"AnotherSpecialName",
"SpecialName",
"SecondSpecialName",
0,
1,
2,
3,
],
)
@check_file_leaks
def test_read_excel_sheet_name(self, sheet_name):
eval_io(
fn_name="read_excel",
# read_excel kwargs
io="modin/pandas/test/data/modin_error_book.xlsx",
sheet_name=sheet_name,
)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="TypeError: Expected list, got type - issue #3284",
)
def test_ExcelFile(self, make_excel_file):
unique_filename = make_excel_file()
modin_excel_file = pd.ExcelFile(unique_filename)
pandas_excel_file = pandas.ExcelFile(unique_filename)
try:
df_equals(modin_excel_file.parse(), pandas_excel_file.parse())
assert modin_excel_file.io == unique_filename
assert isinstance(modin_excel_file, pd.ExcelFile)
finally:
modin_excel_file.close()
pandas_excel_file.close()
@pytest.mark.xfail(strict=False, reason="Flaky test, defaults to pandas")
def test_to_excel(self):
modin_df, pandas_df = create_test_dfs(TEST_DATA)
unique_filename_modin = get_unique_filename(extension="xlsx")
unique_filename_pandas = get_unique_filename(extension="xlsx")
modin_writer = pandas.ExcelWriter(unique_filename_modin)
pandas_writer = pandas.ExcelWriter(unique_filename_pandas)
try:
modin_df.to_excel(modin_writer)
pandas_df.to_excel(pandas_writer)
modin_writer.save()
pandas_writer.save()
assert assert_files_eq(unique_filename_modin, unique_filename_pandas)
finally:
teardown_test_files([unique_filename_modin, unique_filename_pandas])
@pytest.mark.xfail(
Engine.get() != "Python", reason="Test fails because of issue 3305"
)
@check_file_leaks
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_excel_empty_frame(self, make_excel_file):
eval_io(
fn_name="read_excel",
modin_warning=UserWarning,
# read_excel kwargs
io=make_excel_file(),
usecols=[0],
index_col=0,
)
class TestHdf:
@pytest.mark.parametrize("format", [None, "table"])
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_hdf(self, make_hdf_file, format):
eval_io(
fn_name="read_hdf",
# read_hdf kwargs
path_or_buf=make_hdf_file(format=format),
key="df",
)
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_HDFStore(self):
hdf_file = None
unique_filename_modin = get_unique_filename(extension="hdf")
unique_filename_pandas = get_unique_filename(extension="hdf")
try:
modin_store = pd.HDFStore(unique_filename_modin)
pandas_store = pandas.HDFStore(unique_filename_pandas)
modin_df, pandas_df = create_test_dfs(TEST_DATA)
modin_store["foo"] = modin_df
pandas_store["foo"] = pandas_df
modin_df = modin_store.get("foo")
pandas_df = pandas_store.get("foo")
df_equals(modin_df, pandas_df)
modin_store.close()
pandas_store.close()
modin_df = pandas.read_hdf(unique_filename_modin, key="foo", mode="r")
pandas_df = pandas.read_hdf(unique_filename_pandas, key="foo", mode="r")
df_equals(modin_df, pandas_df)
assert isinstance(modin_store, pd.HDFStore)
handle, hdf_file = tempfile.mkstemp(suffix=".hdf5", prefix="test_read")
os.close(handle)
with pd.HDFStore(hdf_file, mode="w") as store:
store.append("data/df1", pd.DataFrame(np.random.randn(5, 5)))
store.append("data/df2", pd.DataFrame(np.random.randn(4, 4)))
modin_df = pd.read_hdf(hdf_file, key="data/df1", mode="r")
pandas_df = pandas.read_hdf(hdf_file, key="data/df1", mode="r")
df_equals(modin_df, pandas_df)
finally:
if hdf_file:
os.unlink(hdf_file)
teardown_test_files([unique_filename_modin, unique_filename_pandas])
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_HDFStore_in_read_hdf(self):
filename = get_unique_filename(extension="hdf")
dfin = pd.DataFrame(np.random.rand(8, 8))
try:
dfin.to_hdf(filename, "/key")
with pd.HDFStore(filename) as h:
modin_df = pd.read_hdf(h, "/key")
with pandas.HDFStore(filename) as h:
pandas_df = pandas.read_hdf(h, "/key")
df_equals(modin_df, pandas_df)
finally:
teardown_test_files([filename])
class TestSql:
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_sql(self, make_sql_connection):
filename = get_unique_filename(extension="db")
table = "test_read_sql"
conn = make_sql_connection(filename, table)
query = f"select * from {table}"
eval_io(
fn_name="read_sql",
# read_sql kwargs
sql=query,
con=conn,
)
eval_io(
fn_name="read_sql",
# read_sql kwargs
sql=query,
con=conn,
index_col="index",
)
with pytest.warns(UserWarning):
pd.read_sql_query(query, conn)
with pytest.warns(UserWarning):
pd.read_sql_table(table, conn)
# Test SQLAlchemy engine
sqlalchemy_engine = sa.create_engine(conn)
eval_io(
fn_name="read_sql",
# read_sql kwargs
sql=query,
con=sqlalchemy_engine,
)
# Test SQLAlchemy Connection
sqlalchemy_connection = sqlalchemy_engine.connect()
eval_io(
fn_name="read_sql",
# read_sql kwargs
sql=query,
con=sqlalchemy_connection,
)
modin_df = pd.read_sql(
sql=query, con=ModinDatabaseConnection("sqlalchemy", conn)
)
pandas_df = pandas.read_sql(sql=query, con=sqlalchemy_connection)
df_equals(modin_df, pandas_df)
with pytest.raises(UnsupportedDatabaseException):
ModinDatabaseConnection("unsupported_database")
@pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="The reason of tests fail in `cloud` mode is unknown for now - issue #3264",
)
def test_read_sql_with_chunksize(self, make_sql_connection):
filename = get_unique_filename(extension="db")
table = "test_read_sql_with_chunksize"
conn = make_sql_connection(filename, table)
query = f"select * from {table}"
pandas_gen = pandas.read_sql(query, conn, chunksize=10)
modin_gen = pd.read_sql(query, conn, chunksize=10)
for modin_df, pandas_df in zip(modin_gen, pandas_gen):
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("index", [False, True])
def test_to_sql(self, make_sql_connection, index):
table_name = f"test_to_sql_{str(index)}"
modin_df, pandas_df = create_test_dfs(TEST_DATA)
# We do not pass the table name so the fixture won't generate a table
conn = make_sql_connection(f"{table_name}_modin.db")
modin_df.to_sql(table_name, conn, index=index)
df_modin_sql = pandas.read_sql(
table_name, con=conn, index_col="index" if index else None
)
# We do not pass the table name so the fixture won't generate a table
conn = make_sql_connection(f"{table_name}_pandas.db")
pandas_df.to_sql(table_name, conn, index=index)
df_pandas_sql = pandas.read_sql(
table_name, con=conn, index_col="index" if index else None
)
assert df_modin_sql.sort_index().equals(df_pandas_sql.sort_index())
class TestHtml:
@pytest.mark.xfail(reason="read_html is not yet implemented properly - issue #1296")
def test_read_html(self, make_html_file):
eval_io(fn_name="read_html", io=make_html_file())
def test_to_html(self):
modin_df, pandas_df = create_test_dfs(TEST_DATA)
eval_to_file(
modin_obj=modin_df, pandas_obj=pandas_df, fn="to_html", extension="html"
)
class TestFwf:
def test_fwf_file(self, make_fwf_file):
fwf_data = (
"id8141 360.242940 149.910199 11950.7\n"
"id1594 444.953632 166.985655 11788.4\n"
"id1849 364.136849 183.628767 11806.2\n"
"id1230 413.836124 184.375703 11916.8\n"
"id1948 502.953953 173.237159 12468.3\n"
)
unique_filename = make_fwf_file(fwf_data=fwf_data)
colspecs = [(0, 6), (8, 20), (21, 33), (34, 43)]
df = pd.read_fwf(unique_filename, colspecs=colspecs, header=None, index_col=0)
assert isinstance(df, pd.DataFrame)
@pytest.mark.parametrize(
"kwargs",
[
{
"colspecs": [
(0, 11),
(11, 15),
(19, 24),
(27, 32),
(35, 40),
(43, 48),
(51, 56),
(59, 64),
(67, 72),
(75, 80),
(83, 88),
(91, 96),
(99, 104),
(107, 112),
],
"names": ["stationID", "year", 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
"na_values": ["-9999"],
"index_col": ["stationID", "year"],
},
{
"widths": [20, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8],
"names": ["id", 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
"index_col": [0],
},
],
)
def test_fwf_file_colspecs_widths(self, make_fwf_file, kwargs):
unique_filename = make_fwf_file()
modin_df = pd.read_fwf(unique_filename, **kwargs)
pandas_df = pd.read_fwf(unique_filename, **kwargs)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("usecols", [["a"], ["a", "b", "d"], [0, 1, 3]])
def test_fwf_file_usecols(self, make_fwf_file, usecols):
fwf_data = (
"a b c d\n"
"id8141 360.242940 149.910199 11950.7\n"
"id1594 444.953632 166.985655 11788.4\n"
"id1849 364.136849 183.628767 11806.2\n"
"id1230 413.836124 184.375703 11916.8\n"
"id1948 502.953953 173.237159 12468.3\n"
)
eval_io(
fn_name="read_fwf",
# read_fwf kwargs
filepath_or_buffer=make_fwf_file(fwf_data=fwf_data),
usecols=usecols,
)
def test_fwf_file_chunksize(self, make_fwf_file):
unique_filename = make_fwf_file()
# Tests __next__ and correctness of reader as an iterator
rdf_reader = pd.read_fwf(unique_filename, chunksize=5)
pd_reader = pandas.read_fwf(unique_filename, chunksize=5)
for modin_df, pd_df in zip(rdf_reader, pd_reader):
df_equals(modin_df, pd_df)
# Tests that get_chunk works correctly
rdf_reader = pd.read_fwf(unique_filename, chunksize=1)
pd_reader = pandas.read_fwf(unique_filename, chunksize=1)
modin_df = rdf_reader.get_chunk(1)
pd_df = pd_reader.get_chunk(1)
df_equals(modin_df, pd_df)
# Tests that read works correctly
rdf_reader = pd.read_fwf(unique_filename, chunksize=1)
pd_reader = pandas.read_fwf(unique_filename, chunksize=1)
modin_df = rdf_reader.read()
pd_df = pd_reader.read()
df_equals(modin_df, pd_df)
@pytest.mark.parametrize(
"nrows",
[
pytest.param(
13,
marks=pytest.mark.xfail(
Engine.get() == "Ray",
reason="read_fwf bug on pandas side: pandas-dev/pandas#44021",
),
),
None,
],
)
def test_fwf_file_skiprows(self, make_fwf_file, nrows):
unique_filename = make_fwf_file()
eval_io(
fn_name="read_fwf",
# read_fwf kwargs
filepath_or_buffer=unique_filename,
skiprows=2,
nrows=nrows,
)
eval_io(
fn_name="read_fwf",
# read_fwf kwargs
filepath_or_buffer=unique_filename,
usecols=[0, 4, 7],
skiprows=[2, 5],
nrows=nrows,
)
def test_fwf_file_index_col(self, make_fwf_file):
fwf_data = (
"a b c d\n"
"id8141 360.242940 149.910199 11950.7\n"
"id1594 444.953632 166.985655 11788.4\n"
"id1849 364.136849 183.628767 11806.2\n"
"id1230 413.836124 184.375703 11916.8\n"
"id1948 502.953953 173.237159 12468.3\n"
)
eval_io(
fn_name="read_fwf",
# read_fwf kwargs
filepath_or_buffer=make_fwf_file(fwf_data=fwf_data),
index_col="c",
)
def test_fwf_file_skipfooter(self, make_fwf_file):
eval_io(
fn_name="read_fwf",
# read_fwf kwargs
filepath_or_buffer=make_fwf_file(),
skipfooter=2,
)
def test_fwf_file_parse_dates(self, make_fwf_file):
dates = pandas.date_range("2000", freq="h", periods=10)
fwf_data = "col1 col2 col3 col4"
for i in range(10, 20):
fwf_data = fwf_data + "\n{col1} {col2} {col3} {col4}".format(
col1=str(i),
col2=str(dates[i - 10].date()),
col3=str(i),
col4=str(dates[i - 10].time()),
)
unique_filename = make_fwf_file(fwf_data=fwf_data)
eval_io(
fn_name="read_fwf",
# read_fwf kwargs
filepath_or_buffer=unique_filename,
parse_dates=[["col2", "col4"]],
)
eval_io(
fn_name="read_fwf",
# read_fwf kwargs
filepath_or_buffer=unique_filename,
parse_dates={"time": ["col2", "col4"]},
)
@pytest.mark.parametrize(
"read_mode",
[
"r",
pytest.param(
"rb",
marks=pytest.mark.xfail(
condition="config.getoption('--simulate-cloud').lower() != 'off'",
reason="Cannot pickle file handles. See comments in PR #2625",
),
),
],
)
def test_read_fwf_file_handle(self, make_fwf_file, read_mode):
with open(make_fwf_file(), mode=read_mode) as buffer:
df_pandas = pandas.read_fwf(buffer)
buffer.seek(0)
df_modin = pd.read_fwf(buffer)
df_equals(df_modin, df_pandas)
def test_read_fwf_empty_frame(self, make_fwf_file):
kwargs = {
"usecols": [0],
"index_col": 0,
}
unique_filename = make_fwf_file()
modin_df = pd.read_fwf(unique_filename, **kwargs)
pandas_df = | pandas.read_fwf(unique_filename, **kwargs) | pandas.read_fwf |
"""
Functions to check data is loaded correctly
"""
import pandas as pd
from flowsa.flowbyfunctions import fba_fill_na_dict, harmonize_units, fba_activity_fields, filter_by_geoscale, \
fba_default_grouping_fields, fbs_default_grouping_fields, aggregator, sector_aggregation, fbs_fill_na_dict, \
fbs_activity_fields, clean_df, create_geoscale_list, sector_disaggregation, replace_strings_with_NoneType, \
replace_NoneType_with_empty_cells
from flowsa.common import US_FIPS, sector_level_key, flow_by_sector_fields, load_sector_length_crosswalk, \
load_sector_crosswalk, sector_source_name, log, fips_number_key, outputpath, activity_fields
def check_flow_by_fields(flowby_df, flowbyfields):
"""
Add in missing fields to have a complete and ordered
:param flowby_df: Either flowbyactivity or flowbysector df
:param flowbyfields: Either flow_by_activity_fields or flow_by_sector_fields
:return:
"""
for k, v in flowbyfields.items():
try:
log.debug("fba activity " + k + " data type is " + str(flowby_df[k].values.dtype))
log.debug("standard " + k + " data type is " + str(v[0]['dtype']))
except:
log.debug("Failed to find field ", k, " in fba")
def check_if_activities_match_sectors(fba):
"""
Checks if activities in flowbyactivity that appear to be like sectors are actually sectors
:param fba: a flow by activity dataset
:return: A list of activities not marching the default sector list or text indicating 100% match
"""
# Get list of activities in a flowbyactivity file
activities = []
for f in fba_activity_fields:
activities.extend(fba[f])
#activities.remove("None")
# Get list of module default sectors
flowsa_sector_list = list(load_sector_crosswalk()[sector_source_name])
activities_missing_sectors = set(activities) - set(flowsa_sector_list)
if len(activities_missing_sectors) > 0:
log.info(str(len(
activities_missing_sectors)) + " activities not matching sectors in default " + sector_source_name + " list.")
return activities_missing_sectors
else:
log.info("All activities match sectors in " + sector_source_name + " list.")
return None
def check_if_data_exists_at_geoscale(df, geoscale, activitynames='All'):
"""
Check if an activity or a sector exists at the specified geoscale
:param df: flowbyactivity dataframe
:param activitynames: Either an activity name (ex. 'Domestic') or a sector (ex. '1124')
:param geoscale: national, state, or county
:return:
"""
# if any activity name is specified, check if activity data exists at the specified geoscale
activity_list = []
if activitynames != 'All':
if isinstance(activitynames, str) == True:
activity_list.append(activitynames)
else:
activity_list = activitynames
# check for specified activity name
df = df[(df[fba_activity_fields[0]].isin(activity_list)) |
(df[fba_activity_fields[1]].isin(activity_list))].reset_index(drop=True)
else:
activity_list.append('activities')
# filter by geoscale depends on Location System
fips = create_geoscale_list(df, geoscale)
df = df[df['Location'].isin(fips)]
if len(df) == 0:
log.info(
"No flows found for " + ', '.join(activity_list) + " at the " + geoscale + " scale")
exists = "No"
else:
log.info("Flows found for " + ', '.join(activity_list) + " at the " + geoscale + " scale")
exists = "Yes"
return exists
def check_if_data_exists_at_less_aggregated_geoscale(df, geoscale, activityname):
"""
In the event data does not exist at specified geoscale, check if data exists at less aggregated level
:param df: Either flowbyactivity or flowbysector dataframe
:param data_to_check: Either an activity name (ex. 'Domestic') or a sector (ex. '1124')
:param geoscale: national, state, or county
:param flowbytype: 'fba' for flowbyactivity, 'fbs' for flowbysector
:return:
"""
if geoscale == 'national':
df = df[(df[fba_activity_fields[0]] == activityname) | (
df[fba_activity_fields[1]] == activityname)]
fips = create_geoscale_list(df, 'state')
df = df[df['Location'].isin(fips)]
if len(df) == 0:
log.info("No flows found for " + activityname + " at the state scale")
fips = create_geoscale_list(df, 'county')
df = df[df['Location'].isin(fips)]
if len(df) == 0:
log.info("No flows found for " + activityname + " at the county scale")
else:
log.info("Flowbyactivity data exists for " + activityname + " at the county level")
new_geoscale_to_use = 'county'
return new_geoscale_to_use
else:
log.info("Flowbyactivity data exists for " + activityname + " at the state level")
new_geoscale_to_use = 'state'
return new_geoscale_to_use
if geoscale == 'state':
df = df[(df[fba_activity_fields[0]] == activityname) | (
df[fba_activity_fields[1]] == activityname)]
fips = create_geoscale_list(df, 'county')
df = df[df['Location'].isin(fips)]
if len(df) == 0:
log.info("No flows found for " + activityname + " at the county scale")
else:
log.info("Flowbyactivity data exists for " + activityname + " at the county level")
new_geoscale_to_use = 'county'
return new_geoscale_to_use
def check_if_location_systems_match(df1, df2):
"""
Check if two dataframes share the same location system
:param df1: fba or fbs df
:param df2: fba or fbs df
:return:
"""
if df1["LocationSystem"].all() == df2["LocationSystem"].all():
log.info("LocationSystems match")
else:
log.warning("LocationSystems do not match, might lose county level data")
def check_if_data_exists_for_same_geoscales(fba_wsec_walloc, source,
activity): # fba_w_aggregated_sectors
"""
Determine if data exists at the same scales for datasource and allocation source
:param source_fba:
:param allocation_fba:
:return:
"""
# todo: modify so only returns warning if no value for entire location, not just no value for one of the possible sectors
from flowsa.mapping import get_activitytosector_mapping
# create list of highest sector level for which there should be data
mapping = get_activitytosector_mapping(source)
# filter by activity of interest
mapping = mapping.loc[mapping['Activity'].isin(activity)]
# add sectors to list
sectors_list = pd.unique(mapping['Sector']).tolist()
# subset fba w sectors and with merged allocation table so only have rows with aggregated sector list
df_subset = fba_wsec_walloc.loc[
(fba_wsec_walloc[fbs_activity_fields[0]].isin(sectors_list)) |
(fba_wsec_walloc[fbs_activity_fields[1]].isin(sectors_list))].reset_index(drop=True)
# only interested in total flows
# df_subset = df_subset.loc[df_subset['FlowName'] == 'total'].reset_index(drop=True)
# df_subset = df_subset.loc[df_subset['Compartment'] == 'total'].reset_index(drop=True)
# create subset of fba where the allocation data is missing
missing_alloc = df_subset.loc[df_subset['FlowAmountRatio'].isna()].reset_index(drop=True)
# drop any rows where source flow value = 0
missing_alloc = missing_alloc.loc[missing_alloc['FlowAmount'] != 0].reset_index(drop=True)
# create list of locations with missing alllocation data
states_missing_data = pd.unique(missing_alloc['Location']).tolist()
if len(missing_alloc) == 0:
log.info("All aggregated sector flows have allocation flow ratio data")
else:
log.warning("Missing allocation flow ratio data for " + ', '.join(states_missing_data))
return None
def check_if_losing_sector_data(df, target_sector_level):
"""
Determine rows of data that will be lost if subset data at target sector level
In some instances, not all
:param fbs:
:return:
"""
# exclude nonsectors
df = replace_NoneType_with_empty_cells(df)
rows_lost = pd.DataFrame()
for i in range(2, sector_level_key[target_sector_level]):
# create df of i length
df_x1 = df.loc[(df[fbs_activity_fields[0]].apply(lambda x: len(x) == i)) &
(df[fbs_activity_fields[1]] == '')]
df_x2 = df.loc[(df[fbs_activity_fields[0]] == '') &
(df[fbs_activity_fields[1]].apply(lambda x: len(x) == i))]
df_x3 = df.loc[(df[fbs_activity_fields[0]].apply(lambda x: len(x) == i)) &
(df[fbs_activity_fields[1]].apply(lambda x: len(x) == i))]
df_x = pd.concat([df_x1, df_x2, df_x3], ignore_index=True, sort=False)
# create df of i + 1 length
df_y1 = df.loc[df[fbs_activity_fields[0]].apply(lambda x: len(x) == i + 1) |
df[fbs_activity_fields[1]].apply(lambda x: len(x) == i + 1)]
df_y2 = df.loc[df[fbs_activity_fields[0]].apply(lambda x: len(x) == i + 1) &
df[fbs_activity_fields[1]].apply(lambda x: len(x) == i + 1)]
df_y = pd.concat([df_y1, df_y2], ignore_index=True, sort=False)
# create temp sector columns in df y, that are i digits in length
df_y.loc[:, 'spb_tmp'] = df_y[fbs_activity_fields[0]].apply(lambda x: x[0:i])
df_y.loc[:, 'scb_tmp'] = df_y[fbs_activity_fields[1]].apply(lambda x: x[0:i])
# don't modify household sector lengths
df_y = df_y.replace({'F0': 'F010',
'F01': 'F010'})
# merge the two dfs
df_m = pd.merge(df_x,
df_y[['Class', 'Context', 'FlowType', 'Flowable', 'Location', 'LocationSystem', 'Unit',
'Year', 'spb_tmp', 'scb_tmp']],
how='left',
left_on=['Class', 'Context', 'FlowType', 'Flowable', 'Location', 'LocationSystem', 'Unit',
'Year', 'SectorProducedBy', 'SectorConsumedBy'],
right_on=['Class', 'Context', 'FlowType', 'Flowable', 'Location', 'LocationSystem', 'Unit',
'Year', 'spb_tmp', 'scb_tmp'])
# extract the rows that are not disaggregated to more specific naics
rl = df_m[(df_m['scb_tmp'].isnull()) & (df_m['spb_tmp'].isnull())]
# clean df
rl = clean_df(rl, flow_by_sector_fields, fbs_fill_na_dict)
rl_list = rl[['SectorProducedBy', 'SectorConsumedBy']].drop_duplicates().values.tolist()
# match sectors with target sector length sectors
# import cw and subset to current sector length and target sector length
cw_load = load_sector_length_crosswalk()
nlength = list(sector_level_key.keys())[list(sector_level_key.values()).index(i)]
cw = cw_load[[nlength, target_sector_level]].drop_duplicates()
# add column with counts
cw['sector_count'] = cw.groupby(nlength)[nlength].transform('count')
# merge df & conditionally replace sector produced/consumed columns
rl_m = pd.merge(rl, cw, how='left', left_on=[fbs_activity_fields[0]], right_on=[nlength])
rl_m.loc[rl_m[fbs_activity_fields[0]] != '', fbs_activity_fields[0]] = rl_m[target_sector_level]
rl_m = rl_m.drop(columns=[nlength, target_sector_level])
rl_m2 = pd.merge(rl_m, cw, how='left', left_on=[fbs_activity_fields[1]], right_on=[nlength])
rl_m2.loc[rl_m2[fbs_activity_fields[1]] != '', fbs_activity_fields[1]] = rl_m2[target_sector_level]
rl_m2 = rl_m2.drop(columns=[nlength, target_sector_level])
# create one sector count column
rl_m2['sector_count_x'] = rl_m2['sector_count_x'].fillna(rl_m2['sector_count_y'])
rl_m3 = rl_m2.rename(columns={'sector_count_x': 'sector_count'})
rl_m3 = rl_m3.drop(columns=['sector_count_y'])
# calculate new flow amounts, based on sector count, allocating equally to the new sector length codes
rl_m3['FlowAmount'] = rl_m3['FlowAmount'] / rl_m3['sector_count']
rl_m3 = rl_m3.drop(columns=['sector_count'])
# append to df
if len(rl) != 0:
log.warning('Data found at ' + str(i) + ' digit NAICS not represented in current '
'data subset: {}'.format(' '.join(map(str, rl_list))))
rows_lost = rows_lost.append(rl_m3, ignore_index=True, sort=True)
if len(rows_lost) == 0:
log.info('Data exists at ' + target_sector_level)
else:
log.info('Allocating FlowAmounts equally to each ' + target_sector_level +
' associated with the sectors previously dropped')
# add rows of missing data to the fbs sector subset
df_w_lost_data = pd.concat([df, rows_lost], ignore_index=True, sort=True)
df_w_lost_data = replace_strings_with_NoneType(df_w_lost_data)
return df_w_lost_data
def check_allocation_ratios(flow_alloc_df, activity_set, source_name, method_name):
"""
Check for issues with the flow allocation ratios
:param df:
:return:
"""
# create column of sector lengths
flow_alloc_df.loc[:, 'slength'] = flow_alloc_df['Sector'].apply(lambda x: len(x))
# subset df
flow_alloc_df2 = flow_alloc_df[['FBA_Activity', 'Location', 'slength', 'FlowAmountRatio']]
# sum the flow amount ratios by location and sector length
flow_alloc_df3 = flow_alloc_df2.groupby(['FBA_Activity', 'Location', 'slength'],
as_index=False)[["FlowAmountRatio"]].agg("sum")
# not interested in sector length > 6
flow_alloc_df4 = flow_alloc_df3[flow_alloc_df3['slength'] <= 6]
ua_count1 = len(flow_alloc_df4[flow_alloc_df4['FlowAmountRatio'] < 1])
log.info('There are ' + str(ua_count1) +
' instances at a sector length of 6 or less where the allocation ratio for a location and sector length is < 1')
ua_count2 = len(flow_alloc_df4[flow_alloc_df4['FlowAmountRatio'] < 0.99])
log.info('There are ' + str(ua_count2) +
' instances at a sector length of 6 or less where the allocation ratio for a location and sector length is < 0.99')
ua_count3 = len(flow_alloc_df4[flow_alloc_df4['FlowAmountRatio'] > 1])
log.info('There are ' + str(ua_count3) +
' instances at a sector length of 6 or less where the allocation ratio for a location and sector length is > 1')
ua_count4 = len(flow_alloc_df4[flow_alloc_df4['FlowAmountRatio'] > 1.01])
log.info('There are ' + str(ua_count4) +
' instances at a sector length of 6 or less where the allocation ratio for a location and sector length is > 1.01')
# save csv to output folder
log.info('Save the summary table of flow allocation ratios for each sector length for ' +
activity_set + ' in output folder')
# output data for all sector lengths
flow_alloc_df3.to_csv(outputpath + "FlowBySectorMethodAnalysis/" + method_name + '_' + source_name +
"_allocation_ratios_" + activity_set + ".csv", index=False)
return None
def check_for_differences_between_fba_load_and_fbs_output(fba_load, fbs_load, activity_set, source_name, method_name):
"""
Function to compare the loaded flowbyactivity with the final flowbysector output, checking for data loss
:param df:
:return:
"""
from flowsa.flowbyfunctions import replace_strings_with_NoneType, replace_NoneType_with_empty_cells
# subset fba df
fba = fba_load[['Class', 'MetaSources', 'Flowable', 'Unit', 'FlowType', 'ActivityProducedBy',
'ActivityConsumedBy', 'Context', 'Location', 'LocationSystem', 'Year',
'FlowAmount']].drop_duplicates().reset_index(drop=True)
fba.loc[:, 'Location'] = US_FIPS
group_cols = ['ActivityProducedBy', 'ActivityConsumedBy', 'Flowable', 'Unit', 'FlowType', 'Context',
'Location', 'LocationSystem', 'Year']
fba_agg = aggregator(fba, group_cols)
fba_agg.rename(columns={'FlowAmount': 'FBA_amount'}, inplace=True)
# subset fbs df
fbs = fbs_load[['Class', 'SectorSourceName', 'Flowable', 'Unit', 'FlowType', 'SectorProducedBy', 'SectorConsumedBy',
'ActivityProducedBy', 'ActivityConsumedBy', 'Context', 'Location', 'LocationSystem', 'Year',
'FlowAmount']].drop_duplicates().reset_index(drop=True)
fbs = replace_NoneType_with_empty_cells(fbs)
fbs['ProducedLength'] = fbs['SectorProducedBy'].apply(lambda x: len(x))
fbs['ConsumedLength'] = fbs['SectorConsumedBy'].apply(lambda x: len(x))
fbs['SectorLength'] = fbs[['ProducedLength', 'ConsumedLength']].max(axis=1)
fbs.loc[:, 'Location'] = US_FIPS
group_cols = ['ActivityProducedBy', 'ActivityConsumedBy', 'Flowable', 'Unit', 'FlowType', 'Context',
'Location', 'LocationSystem', 'Year', 'SectorLength']
fbs_agg = aggregator(fbs, group_cols)
fbs_agg.rename(columns={'FlowAmount': 'FBS_amount'}, inplace=True)
# merge compare 1 and compare 2
df_merge = fba_agg.merge(fbs_agg,
left_on=['ActivityProducedBy', 'ActivityConsumedBy', 'Flowable', 'Unit',
'FlowType', 'Context', 'Location','LocationSystem', 'Year'],
right_on=['ActivityProducedBy', 'ActivityConsumedBy', 'Flowable', 'Unit',
'FlowType', 'Context', 'Location', 'LocationSystem', 'Year'],
how='left')
df_merge['Ratio'] = df_merge['FBS_amount'] / df_merge['FBA_amount']
# reorder
df_merge = df_merge[['ActivityProducedBy', 'ActivityConsumedBy', 'Flowable', 'Unit', 'FlowType', 'Context',
'Location', 'LocationSystem', 'Year', 'SectorLength', 'FBA_amount', 'FBS_amount', 'Ratio']]
# only report difference at sector length <= 6
comparison = df_merge[df_merge['SectorLength'] <= 6]
# todo: address the duplicated rows/data that occur for non-naics household sector length
ua_count1 = len(comparison[comparison['Ratio'] < 0.95])
log.info('There are ' + str(ua_count1) +
' combinations of flowable/context/sector length where the flowbyactivity to flowbysector ratio is < 0.95')
ua_count2 = len(comparison[comparison['Ratio'] < 0.99])
log.info('There are ' + str(ua_count2) +
' combinations of flowable/context/sector length where the flowbyactivity to flowbysector ratio is < 0.99')
oa_count1 = len(comparison[comparison['Ratio'] > 1])
log.info('There are ' + str(oa_count1) +
' combinations of flowable/context/sector length where the flowbyactivity to flowbysector ratio is > 1.0')
oa_count2 = len(comparison[comparison['Ratio'] > 1.01])
log.info('There are ' + str(oa_count2) +
' combinations of flowable/context/sector length where the flowbyactivity to flowbysector ratio is > 1.01')
# save csv to output folder
log.info('Save the comparison of FlowByActivity load to FlowBySector ratios for ' +
activity_set + ' in output folder')
# output data at all sector lengths
df_merge.to_csv(outputpath + "FlowBySectorMethodAnalysis/" + method_name + '_' + source_name +
"_FBA_load_to_FBS_comparison_" + activity_set + ".csv", index=False)
return None
def compare_fba_load_and_fbs_output_totals(fba_load, fbs_load, activity_set, source_name, method_name, attr, method, mapping_files):
"""
Function to compare the loaded flowbyactivity total with the final flowbysector output total
:param df:
:return:
"""
from flowsa.flowbyfunctions import subset_df_by_geoscale, sector_aggregation
from flowsa.common import load_source_catalog
from flowsa.mapping import map_elementary_flows
log.info('Comparing loaded FlowByActivity FlowAmount total to subset FlowBySector FlowAmount total')
# load source catalog
cat = load_source_catalog()
src_info = cat[source_name]
# extract relevant geoscale data or aggregate existing data
fba = subset_df_by_geoscale(fba_load, attr['allocation_from_scale'], method['target_geoscale'])
# map loaded fba
fba = map_elementary_flows(fba, mapping_files, keep_unmapped_rows=True)
if src_info['sector-like_activities']:
# if activities are sector-like, run sector aggregation and then subset df to only keep NAICS2
fba = fba[['Class', 'FlowAmount', 'Unit', 'Context', 'ActivityProducedBy', 'ActivityConsumedBy', 'Location', 'LocationSystem']]
# rename the activity cols to sector cols for purposes of aggregation
fba = fba.rename(columns={'ActivityProducedBy': 'SectorProducedBy',
'ActivityConsumedBy': 'SectorConsumedBy'})
group_cols_agg = ['Class', 'Context', 'Unit', 'Location', 'LocationSystem', 'SectorProducedBy', 'SectorConsumedBy']
fba = sector_aggregation(fba, group_cols_agg)
# subset fba to only include NAICS2
fba = replace_NoneType_with_empty_cells(fba)
fba = fba[fba['SectorConsumedBy'].apply(lambda x: len(x) == 2) |
fba['SectorProducedBy'].apply(lambda x: len(x) == 2)]
# subset/agg dfs
col_subset = ['Class', 'FlowAmount', 'Unit', 'Context', 'Location', 'LocationSystem']
group_cols = ['Class', 'Unit', 'Context', 'Location', 'LocationSystem']
# fba
fba = fba[col_subset]
fba_agg = aggregator(fba, group_cols).reset_index(drop=True)
fba_agg.rename(columns={'FlowAmount': 'FBA_amount',
'Unit': 'FBA_unit'}, inplace=True)
# fbs
fbs = fbs_load[col_subset]
fbs_agg = aggregator(fbs, group_cols)
fbs_agg.rename(columns={'FlowAmount': 'FBS_amount',
'Unit': 'FBS_unit'}, inplace=True)
try:
# merge FBA and FBS totals
df_merge = fba_agg.merge(fbs_agg, how='left')
df_merge['FlowAmount_difference'] = df_merge['FBA_amount'] - df_merge['FBS_amount']
df_merge['Percent_difference'] = (df_merge['FlowAmount_difference']/df_merge['FBA_amount']) * 100
# reorder
df_merge = df_merge[['Class', 'Context', 'Location', 'LocationSystem', 'FBA_amount', 'FBA_unit',
'FBS_amount', 'FBS_unit', 'FlowAmount_difference', 'Percent_difference']]
df_merge = replace_NoneType_with_empty_cells(df_merge)
# list of contexts
context_list = df_merge['Context'].to_list()
# loop through the contexts and print results of comparison
for i in context_list:
df_merge_subset = df_merge[df_merge['Context'] == i].reset_index(drop=True)
diff_per = df_merge_subset['Percent_difference'][0]
# make reporting more manageable
if abs(diff_per) > 0.001:
diff_per = round(diff_per, 2)
else:
diff_per = round(diff_per, 6)
diff_units = df_merge_subset['FBS_unit'][0]
if diff_per > 0:
log.info('The total FlowBySector FlowAmount for ' + source_name + ' ' + activity_set +
' ' + i + ' is ' + str(abs(diff_per)) + '% less than the total FlowByActivity FlowAmount')
else:
log.info('The total FlowBySector FlowAmount for ' + source_name + ' ' + activity_set +
' ' + i + ' is ' + str(abs(diff_per)) + '% more than the total FlowByActivity FlowAmount')
# save csv to output folder
log.info('Save the comparison of FlowByActivity load to FlowBySector total FlowAmounts for ' +
activity_set + ' in output folder')
# output data at all sector lengths
df_merge.to_csv(outputpath + "FlowBySectorMethodAnalysis/" + method_name + '_' + source_name +
"_FBA_total_to_FBS_total_FlowAmount_comparison_" + activity_set + ".csv", index=False)
except:
log.info('Error occured when comparing total FlowAmounts for FlowByActivity and FlowBySector')
return None
def check_summation_at_sector_lengths(df):
# columns to keep
df_cols = [e for e in df.columns if e not in ('MeasureofSpread', 'Spread', 'DistributionType', 'Min', 'Max',
'DataReliability', 'DataCollection', 'FlowType', 'Compartment',
'Description', 'Activity')]
# subset df
df2 = df[df_cols]
# rename columns and clean up df
df2 = df2[~df2['Sector'].isnull()]
df2 = df2.assign(slength=df2['Sector'].apply(lambda x: len(x)))
# sum flowamounts by sector length
denom_df = df2.copy()
denom_df.loc[:, 'Denominator'] = denom_df.groupby(['Location', 'slength'])['FlowAmount'].transform('sum')
summed_df = denom_df.drop(columns=['Sector', 'FlowAmount']).drop_duplicates().reset_index(drop=True)
# max value
maxv = max(summed_df['Denominator'].apply(lambda x: x))
# percent of total accounted for
summed_df = summed_df.assign(percentOfTot=summed_df['Denominator']/maxv)
summed_df = summed_df.sort_values(['slength']).reset_index(drop=True)
return summed_df
def check_for_nonetypes_in_sector_col(df):
"""
Check for NoneType in columns where datatype = string
:param df: df with columns where datatype = object
:return: warning message if there are NoneTypes
"""
# if datatypes are strings, return warning message
if df['Sector'].isnull().any():
log.warning("There are NoneType values in the 'Sector' column")
return df
def check_for_negative_flowamounts(df):
if (df['FlowAmount'].values < 0).any():
log.warning('There are negative FlowAmounts')
return df
def check_if_sectors_are_naics(df, crosswalk_list, column_headers):
"""
Check if activity-like sectors are in fact sectors. Also works for the Sector column
:return:
"""
# create a df of non-sectors to export
non_sectors_df = []
# create a df of just the non-sectors column
non_sectors_list = []
# loop through the df headers and determine if value is not in crosswalk list
for c in column_headers:
# create df where sectors do not exist in master crosswalk
non_sectors = df[~df[c].isin(crosswalk_list)]
# drop rows where c is empty
non_sectors = non_sectors[non_sectors[c] != '']
# subset to just the sector column
if len(non_sectors) != 0:
sectors = non_sectors[[c]].rename(columns={c: 'NonSectors'})
non_sectors_df.append(non_sectors)
non_sectors_list.append(sectors)
if len(non_sectors_df) != 0:
# concat the df and the df of sectors
# ns_df = pd.concat(non_sectors_df, sort=False, ignore_index=True)
ns_list = | pd.concat(non_sectors_list, sort=False, ignore_index=True) | pandas.concat |
# -*- coding:utf-8 -*-
# !/usr/bin/env python
"""
Date: 2021/11/2 21:08
Desc: 同花顺-数据中心-技术选股
http://data.10jqka.com.cn/rank/cxg/
"""
import pandas as pd
import requests
from bs4 import BeautifulSoup
from py_mini_racer import py_mini_racer
from tqdm import tqdm
from akshare.datasets import get_ths_js
def _get_file_content_ths(file: str = "ths.js") -> str:
"""
获取 JS 文件的内容
:param file: JS 文件名
:type file: str
:return: 文件内容
:rtype: str
"""
setting_file_path = get_ths_js(file)
with open(setting_file_path) as f:
file_data = f.read()
return file_data
def stock_rank_cxg_ths(symbol: str = "创月新高") -> pd.DataFrame:
"""
同花顺-数据中心-技术选股-创新高
http://data.10jqka.com.cn/rank/cxg/
:param symbol: choice of {"创月新高", "半年新高", "一年新高", "历史新高"}
:type symbol: str
:return: 创新高数据
:rtype: pandas.DataFrame
"""
symbol_map = {
"创月新高": "4",
"半年新高": "3",
"一年新高": "2",
"历史新高": "1",
}
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxg/board/{symbol_map[symbol]}/field/stockcode/order/asc/page/1/ajax/1/free/1/"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
total_page = soup.find("span", attrs={"class": "page_info"}).text.split("/")[1]
except AttributeError as e:
total_page = 1
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page) + 1), leave=False):
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxg/board/{symbol_map[symbol]}/field/stockcode/order/asc/page/{page}/ajax/1/free/1/"
r = requests.get(url, headers=headers)
temp_df = pd.read_html(r.text)[0]
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = ["序号", "股票代码", "股票简称", "涨跌幅", "换手率", "最新价", "前期高点", "前期高点日期"]
big_df["股票代码"] = big_df["股票代码"].astype(str).str.zfill(6)
big_df["涨跌幅"] = big_df["涨跌幅"].str.strip("%")
big_df["换手率"] = big_df["换手率"].str.strip("%")
big_df["前期高点日期"] = pd.to_datetime(big_df["前期高点日期"]).dt.date
big_df["涨跌幅"] = pd.to_numeric(big_df["涨跌幅"])
big_df["换手率"] = pd.to_numeric(big_df["换手率"])
big_df["最新价"] = pd.to_numeric(big_df["最新价"])
big_df["前期高点"] = pd.to_numeric(big_df["前期高点"])
return big_df
def stock_rank_cxd_ths(symbol: str = "创月新低") -> pd.DataFrame:
"""
同花顺-数据中心-技术选股-创新低
http://data.10jqka.com.cn/rank/cxd/
:param symbol: choice of {"创月新低", "半年新低", "一年新低", "历史新低"}
:type symbol: str
:return: 创新低数据
:rtype: pandas.DataFrame
"""
symbol_map = {
"创月新低": "4",
"半年新低": "3",
"一年新低": "2",
"历史新低": "1",
}
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxd/board/{symbol_map[symbol]}/field/stockcode/order/asc/page/1/ajax/1/free/1/"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
total_page = soup.find("span", attrs={"class": "page_info"}).text.split("/")[1]
except AttributeError as e:
total_page = 1
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page) + 1), leave=False):
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxd/board/{symbol_map[symbol]}/field/stockcode/order/asc/page/{page}/ajax/1/free/1/"
r = requests.get(url, headers=headers)
temp_df = pd.read_html(r.text)[0]
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = ["序号", "股票代码", "股票简称", "涨跌幅", "换手率", "最新价", "前期低点", "前期低点日期"]
big_df["股票代码"] = big_df["股票代码"].astype(str).str.zfill(6)
big_df["涨跌幅"] = big_df["涨跌幅"].str.strip("%")
big_df["换手率"] = big_df["换手率"].str.strip("%")
big_df["前期低点日期"] = pd.to_datetime(big_df["前期低点日期"]).dt.date
big_df["涨跌幅"] = pd.to_numeric(big_df["涨跌幅"])
big_df["换手率"] = pd.to_numeric(big_df["换手率"])
big_df["最新价"] = pd.to_numeric(big_df["最新价"])
big_df["前期低点"] = pd.to_numeric(big_df["前期低点"])
return big_df
def stock_rank_lxsz_ths() -> pd.DataFrame:
"""
同花顺-数据中心-技术选股-连续上涨
http://data.10jqka.com.cn/rank/lxsz/
:return: 连续上涨
:rtype: pandas.DataFrame
"""
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/lxsz/field/lxts/order/desc/page/1/ajax/1/free/1/"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
total_page = soup.find("span", attrs={"class": "page_info"}).text.split("/")[1]
except AttributeError as e:
total_page = 1
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page) + 1), leave=False):
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/lxsz/field/lxts/order/desc/page/{page}/ajax/1/free/1/"
r = requests.get(url, headers=headers)
temp_df = pd.read_html(r.text, converters={"股票代码": str})[0]
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = [
"序号",
"股票代码",
"股票简称",
"收盘价",
"最高价",
"最低价",
"连涨天数",
"连续涨跌幅",
"累计换手率",
"所属行业",
]
big_df["连续涨跌幅"] = big_df["连续涨跌幅"].str.strip("%")
big_df["累计换手率"] = big_df["累计换手率"].str.strip("%")
big_df["连续涨跌幅"] = pd.to_numeric(big_df["连续涨跌幅"])
big_df["累计换手率"] = pd.to_numeric(big_df["累计换手率"])
big_df["收盘价"] = pd.to_numeric(big_df["收盘价"])
big_df["最高价"] = pd.to_numeric(big_df["最高价"])
big_df["最低价"] = pd.to_numeric(big_df["最低价"])
big_df["连涨天数"] = pd.to_numeric(big_df["连涨天数"])
return big_df
def stock_rank_lxxd_ths() -> pd.DataFrame:
"""
同花顺-数据中心-技术选股-连续下跌
http://data.10jqka.com.cn/rank/lxxd/
:return: 连续下跌
:rtype: pandas.DataFrame
"""
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/lxxd/field/lxts/order/desc/page/1/ajax/1/free/1/"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
total_page = soup.find("span", attrs={"class": "page_info"}).text.split("/")[1]
except AttributeError as e:
total_page = 1
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page) + 1), leave=False):
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/lxxd/field/lxts/order/desc/page/{page}/ajax/1/free/1/"
r = requests.get(url, headers=headers)
temp_df = pd.read_html(r.text, converters={"股票代码": str})[0]
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = [
"序号",
"股票代码",
"股票简称",
"收盘价",
"最高价",
"最低价",
"连涨天数",
"连续涨跌幅",
"累计换手率",
"所属行业",
]
big_df["连续涨跌幅"] = big_df["连续涨跌幅"].str.strip("%")
big_df["累计换手率"] = big_df["累计换手率"].str.strip("%")
big_df["连续涨跌幅"] = pd.to_numeric(big_df["连续涨跌幅"])
big_df["累计换手率"] = pd.to_numeric(big_df["累计换手率"])
big_df["收盘价"] = pd.to_numeric(big_df["收盘价"])
big_df["最高价"] = pd.to_numeric(big_df["最高价"])
big_df["最低价"] = pd.to_numeric(big_df["最低价"])
big_df["连涨天数"] = pd.to_numeric(big_df["连涨天数"])
return big_df
def stock_rank_cxfl_ths() -> pd.DataFrame:
"""
同花顺-数据中心-技术选股-持续放量
http://data.10jqka.com.cn/rank/cxfl/
:return: 持续放量
:rtype: pandas.DataFrame
"""
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxfl/field/count/order/desc/ajax/1/free/1/page/1/free/1/"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
total_page = soup.find("span", attrs={"class": "page_info"}).text.split("/")[1]
except AttributeError as e:
total_page = 1
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page) + 1), leave=False):
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxfl/field/count/order/desc/ajax/1/free/1/page/{page}/free/1/"
r = requests.get(url, headers=headers)
temp_df = pd.read_html(r.text, converters={"股票代码": str})[0]
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = [
"序号",
"股票代码",
"股票简称",
"涨跌幅",
"最新价",
"成交量",
"基准日成交量",
"放量天数",
"阶段涨跌幅",
"所属行业",
]
big_df["股票代码"] = big_df["股票代码"].astype(str).str.zfill(6)
big_df["涨跌幅"] = big_df["涨跌幅"].astype(str).str.strip("%")
big_df["阶段涨跌幅"] = big_df["阶段涨跌幅"].astype(str).str.strip("%")
big_df["涨跌幅"] = pd.to_numeric(big_df["涨跌幅"])
big_df["阶段涨跌幅"] = pd.to_numeric(big_df["阶段涨跌幅"])
big_df["最新价"] = pd.to_numeric(big_df["最新价"])
big_df["放量天数"] = pd.to_numeric(big_df["放量天数"])
return big_df
def stock_rank_cxsl_ths() -> pd.DataFrame:
"""
同花顺-数据中心-技术选股-持续缩量
http://data.10jqka.com.cn/rank/cxsl/
:return: 持续缩量
:rtype: pandas.DataFrame
"""
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxsl/field/count/order/desc/ajax/1/free/1/page/1/free/1/"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
total_page = soup.find("span", attrs={"class": "page_info"}).text.split("/")[1]
except AttributeError as e:
total_page = 1
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page) + 1), leave=False):
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxsl/field/count/order/desc/ajax/1/free/1/page/{page}/free/1/"
r = requests.get(url, headers=headers)
temp_df = pd.read_html(r.text, converters={"股票代码": str})[0]
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = [
"序号",
"股票代码",
"股票简称",
"涨跌幅",
"最新价",
"成交量",
"基准日成交量",
"缩量天数",
"阶段涨跌幅",
"所属行业",
]
big_df["股票代码"] = big_df["股票代码"].astype(str).str.zfill(6)
big_df["涨跌幅"] = big_df["涨跌幅"].astype(str).str.strip("%")
big_df["阶段涨跌幅"] = big_df["阶段涨跌幅"].astype(str).str.strip("%")
big_df["涨跌幅"] = pd.to_numeric(big_df["涨跌幅"])
big_df["阶段涨跌幅"] = pd.to_numeric(big_df["阶段涨跌幅"])
big_df["最新价"] = pd.to_numeric(big_df["最新价"])
big_df["缩量天数"] = pd.to_numeric(big_df["缩量天数"])
return big_df
def stock_rank_xstp_ths(symbol: str = "500日均线") -> pd.DataFrame:
"""
同花顺-数据中心-技术选股-向上突破
http://data.10jqka.com.cn/rank/xstp/
:param symbol: choice of {"5日均线", "10日均线", "20日均线", "30日均线", "60日均线", "90日均线", "250日均线", "500日均线"}
:type symbol: str
:return: 向上突破
:rtype: pandas.DataFrame
"""
symbol_map = {
"5日均线": 5,
"10日均线": 10,
"20日均线": 20,
"30日均线": 30,
"60日均线": 60,
"90日均线": 90,
"250日均线": 250,
"500日均线": 500,
}
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/xstp/board/{symbol_map[symbol]}/order/asc/ajax/1/free/1/page/1/free/1/"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
total_page = soup.find("span", attrs={"class": "page_info"}).text.split("/")[1]
except AttributeError as e:
total_page = 1
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page) + 1), leave=False):
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/xstp/board/{symbol_map[symbol]}/order/asc/ajax/1/free/1/page/{page}/free/1/"
r = requests.get(url, headers=headers)
temp_df = pd.read_html(r.text, converters={"股票代码": str})[0]
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = [
"序号",
"股票代码",
"股票简称",
"最新价",
"成交额",
"成交量",
"涨跌幅",
"换手率",
]
big_df["股票代码"] = big_df["股票代码"].astype(str).str.zfill(6)
big_df["涨跌幅"] = big_df["涨跌幅"].astype(str).str.strip("%")
big_df["换手率"] = big_df["换手率"].astype(str).str.strip("%")
big_df["涨跌幅"] = pd.to_numeric(big_df["涨跌幅"])
big_df["换手率"] = pd.to_numeric(big_df["换手率"])
big_df["最新价"] = pd.to_numeric(big_df["最新价"])
return big_df
def stock_rank_xxtp_ths(symbol: str = "500日均线") -> pd.DataFrame:
"""
同花顺-数据中心-技术选股-向下突破
http://data.10jqka.com.cn/rank/xxtp/
:param symbol: choice of {"5日均线", "10日均线", "20日均线", "30日均线", "60日均线", "90日均线", "250日均线", "500日均线"}
:type symbol: str
:return: 向下突破
:rtype: pandas.DataFrame
"""
symbol_map = {
"5日均线": 5,
"10日均线": 10,
"20日均线": 20,
"30日均线": 30,
"60日均线": 60,
"90日均线": 90,
"250日均线": 250,
"500日均线": 500,
}
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/xxtp/board/{symbol_map[symbol]}/order/asc/ajax/1/free/1/page/1/free/1/"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
total_page = soup.find("span", attrs={"class": "page_info"}).text.split("/")[1]
except AttributeError as e:
total_page = 1
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page) + 1), leave=False):
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/xxtp/board/{symbol_map[symbol]}/order/asc/ajax/1/free/1/page/{page}/free/1/"
r = requests.get(url, headers=headers)
temp_df = pd.read_html(r.text, converters={"股票代码": str})[0]
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = [
"序号",
"股票代码",
"股票简称",
"最新价",
"成交额",
"成交量",
"涨跌幅",
"换手率",
]
big_df["股票代码"] = big_df["股票代码"].astype(str).str.zfill(6)
big_df["涨跌幅"] = big_df["涨跌幅"].astype(str).str.strip("%")
big_df["换手率"] = big_df["换手率"].astype(str).str.strip("%")
big_df["涨跌幅"] = pd.to_numeric(big_df["涨跌幅"])
big_df["换手率"] = pd.to_numeric(big_df["换手率"])
big_df["最新价"] = pd.to_numeric(big_df["最新价"])
return big_df
def stock_rank_ljqs_ths() -> pd.DataFrame:
"""
同花顺-数据中心-技术选股-量价齐升
http://data.10jqka.com.cn/rank/ljqs/
:return: 量价齐升
:rtype: pandas.DataFrame
"""
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/ljqs/field/count/order/desc/ajax/1/free/1/page/1/free/1/"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
total_page = soup.find("span", attrs={"class": "page_info"}).text.split("/")[1]
except AttributeError as e:
total_page = 1
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page) + 1), leave=False):
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/ljqs/field/count/order/desc/ajax/1/free/1/page/{page}/free/1/"
r = requests.get(url, headers=headers)
temp_df = | pd.read_html(r.text, converters={"股票代码": str}) | pandas.read_html |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 19 13:30:14 2020
@author: mehrdad
"""
# NOTE: This class/file is meant to be located in a data-access-layer (DAL) package/namespace
from commonlayer.common_helper_class import CommonHelper
from dal.base.table_interface import TableInterface
import pandas as pd
class TripPlans(TableInterface):
def load_trip_plan(self, trip, mode, numItineraries, maxWalkDistance):
qstr = """SELECT start_time,
ST_AsText(origin) as origin, ST_AsText(destination) as destination,
mode, max_walk_distance, no_of_itins,
plan
FROM trip_plans
WHERE start_time = '{0}' AND
origin = '{1}' AND destination = '{2}' AND
mode = '{3}' AND max_walk_distance = {4} AND no_of_itins = {5};
""".format(
CommonHelper.DateTime_to_Text(trip.starttime),
CommonHelper.pointRow_to_postgisPoint(trip.origin), CommonHelper.pointRow_to_postgisPoint(trip.destination),
mode, maxWalkDistance, numItineraries
)
res, plan_rows = self.db_command.ExecuteSQL(qstr, LOG_IMPORTANT_CUSTOM = False)
return res, plan_rows
def store_trip_plan(self, trip, mode, numItineraries, maxWalkDistance, plan):
qstr = """INSERT INTO trip_plans (start_time, origin, destination, mode, max_walk_distance, no_of_itins, plan)
VALUES ('{0}',
ST_GeomFromText('{1}'), ST_GeomFromText('{2}'),
'{3}',{4},{5},
'{6}' ); """.format(
CommonHelper.DateTime_to_Text(trip.starttime),
CommonHelper.pointRow_to_postgisPoint(trip.origin), CommonHelper.pointRow_to_postgisPoint(trip.destination),
mode, maxWalkDistance, numItineraries,
CommonHelper.json_to_sqlstr(plan)
)
res, db_res = self.db_command.ExecuteSQL(qstr, LOG_IMPORTANT_CUSTOM=False)
return res
def delete_trip_plan(self, trip, mode, numItineraries, maxWalkDistance):
qstr = """DELETE FROM trip_plans
WHERE start_time = '{0}' AND
origin = '{1}' AND destination = '{2}' AND
mode = '{3}' AND max_walk_distance = {4} AND no_of_itins = {5};
""".format(
CommonHelper.DateTime_to_Text(trip.starttime),
CommonHelper.pointRow_to_postgisPoint(trip.origin), CommonHelper.pointRow_to_postgisPoint(trip.destination),
mode, maxWalkDistance, numItineraries
)
res, db_res = self.db_command.ExecuteSQL(qstr, LOG_IMPORTANT_CUSTOM=False)
return res, self._get_delete_macthed_count(res, db_res)
def load_failed_OTP_plans(self):
qstr = """
SELECT user_id as "user", trip_id as "trip", t.start_time,
--ST_AsText(t.origin) origin, ST_AsText(t.destination) destination,
CONCAT('(', st_y(st_astext(tt.origin))::text , ',' , st_x(st_astext(tt.origin))::text, ')') plan_origin,
CONCAT('(', st_y(st_astext(tt.destination ))::text , ',' , st_x(st_astext(tt.destination))::text, ')') plan_destination,
mode as mainmode, max_walk_distance, no_of_itins, plan
FROM
(
-- Observed trips with zero computed alternative (no walk, bike, PT) --
SELECT user_id, id as trip_id,
start_time, origin, destination
From trips_alts
WHERE plan_id = 0
--------- Mandatory Filters ------------
AND duration > '00:00:00' AND duration < '1 day' --TODO: why there's such bugs?
--------- Geographical Filters ---------:
-- Helsinki region, only within the area (use AND):
AND point(geometry(origin)) <@ box'(24.572978,60.100104)(25.216365, 60.336453)' -- Helsinki region rectangular boundaries
AND point(geometry(destination)) <@ box'(24.572978,60.100104)(25.216365, 60.336453)' -- Helsinki region rectangular boundaries
-- Greater Jatkasaari rectangular boundaries, Trips from/to:
--AND (point(geometry(origin)) <@ box'(24.895571, 60.145601)(24.931923, 60.168201)'
--OR point(geometry(destination)) <@ box'(24.895571, 60.145601)(24.931923, 60.168201)')
AND (user_id, id) not in
(
select user_id, id
from trips_alts
where plan_id > 0
)
) t
INNER JOIN (SELECT * FROM trip_plans) tt
ON (tt.start_time = t.start_time AND tt.origin = t.origin AND tt.destination = t.destination)
ORDER BY user_id, trip_id, mode
"""
res, db_res = self.db_command.ExecuteSQL(qstr)
if res and db_res.rowcount>0:
records_df = pd.DataFrame(db_res.fetchall())
records_df.columns = db_res.keys()
print()
print("load_failed_OTP_computations(): Loaded from DB,",db_res.rowcount,"records")
print()
else:
records_df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import json
from tqdm import tqdm
from utils import odds, clean_sheet, time_decay, score_mtx, get_next_gw
from ranked_probability_score import ranked_probability_score, match_outcome
import pymc3 as pm
import theano.tensor as tt
class Bayesian:
""" Model scored goals at home and away as Bayesian Random variables """
def __init__(self, games, performance='score', decay=True):
"""
Args:
games (pd.DataFrame): Finished games to used for training.
performance (string): Observed performance metric to use in model
decay (boolean): Apply time decay
"""
teams = np.sort(np.unique(games["team1"]))
league_size = len(teams)
self.teams = (
games.loc[:, ["team1"]]
.drop_duplicates()
.sort_values("team1")
.reset_index(drop=True)
.assign(team_index=np.arange(league_size))
.rename(columns={"team1": "team"})
)
self.league_size = self.teams.shape[0]
df = (
pd.merge(games, self.teams, left_on="team1", right_on="team")
.rename(columns={"team_index": "hg"})
.drop(["team"], axis=1)
.drop_duplicates()
.merge(self.teams, left_on="team2", right_on="team")
.rename(columns={"team_index": "ag"})
.drop(["team"], axis=1)
.sort_values("date")
)
df["date"] = pd.to_datetime(df["date"])
df["days_since"] = (df["date"].max() - df["date"]).dt.days
df["weight"] = time_decay(0.00003, df["days_since"]) if decay else 1
self.decay = decay
# Handle different data to infer
assert performance == 'score' or performance == 'xg'
self.performance = performance
self.games = df.loc[:, [
f"{performance}1", f"{performance}2", "team1", "team2",
"hg", "ag", "weight"]]
self.games = self.games.dropna()
if performance == 'xg':
self.games = (
self.games
.rename(columns={"xg1": "score1", "xg2": "score2"})
)
self.model = self._build_model()
def _build_model(self):
""" Build the model
Returns:
pymc3.Model: untrained model
"""
home_idx, teams = pd.factorize(self.games["team1"], sort=True)
away_idx, _ = pd.factorize(self.games["team2"], sort=True)
with pm.Model() as model:
# constant data
home_team = pm.Data("home_team", home_idx)
away_team = pm.Data("away_team", away_idx)
score1_obs = pm.Data("score1_obs", self.games["score1"])
score2_obs = pm.Data("score2_obs", self.games["score2"])
# global model parameters
home = pm.Normal("home", mu=0, sigma=1)
intercept = pm.Normal("intercept", mu=0, sigma=1)
sd_att = pm.HalfNormal("sd_att", sigma=2)
sd_def = pm.HalfNormal("sd_def", sigma=2)
# team-specific model parameters
atts_star = pm.Normal(
"atts_star",
mu=0,
sigma=sd_att,
shape=self.league_size)
defs_star = pm.Normal(
"defs_star",
mu=0,
sigma=sd_def,
shape=self.league_size)
# apply sum zero constraints
atts = pm.Deterministic(
"atts",
atts_star - tt.mean(atts_star))
defs = pm.Deterministic(
"defs",
defs_star - tt.mean(defs_star))
# calulate theta
home_theta = tt.exp(
intercept + atts[home_team] + defs[away_team] + home)
away_theta = tt.exp(
intercept + atts[away_team] + defs[home_team])
# likelihood of observed data
pm.Potential(
'home_goals',
self.games["weight"].values * pm.Poisson.dist(mu=home_theta).logp(
score1_obs)
)
pm.Potential(
'away_goals',
self.games["weight"].values * pm.Poisson.dist(mu=away_theta).logp(
score2_obs)
)
return model
def fit(self):
"""Fit the model parameters"""
with self.model:
self.trace = pm.sample(
2000,
tune=1000,
cores=6,
return_inferencedata=False,
target_accept=0.85)
def predict(self, games):
"""Predict the outcome of games
Args:
games (pd.DataFrame): Fixtures
Returns:
pd.DataFrame: Fixtures with game odds
"""
parameter_df = (
pd.DataFrame()
.assign(attack=[
np.mean([x[team] for x in self.trace["atts"]])
for team in range(self.league_size)])
.assign(defence=[
np.mean([x[team] for x in self.trace["defs"]])
for team in range(self.league_size)])
.assign(team=np.array(self.teams.team_index.values))
)
fixtures_df = (
pd.merge(games, parameter_df, left_on='hg', right_on='team')
.rename(columns={"attack": "attack1", "defence": "defence1"})
.merge(parameter_df, left_on='ag', right_on='team')
.rename(columns={"attack": "attack2", "defence": "defence2"})
.drop("team_y", axis=1)
.drop("team_x", axis=1)
.assign(home_adv=np.mean(self.trace["home"]))
.assign(intercept=np.mean([x for x in self.trace["intercept"]]))
)
fixtures_df["score1_infered"] = np.exp(
fixtures_df['intercept'] +
fixtures_df["home_adv"] +
fixtures_df["attack1"] +
fixtures_df["defence2"])
fixtures_df["score2_infered"] = np.exp(
fixtures_df['intercept'] +
fixtures_df["attack2"] +
fixtures_df["defence1"])
def synthesize_odds(row):
""" Lambda function that parses row by row to compute score matrix
Args:
row (array): Fixture
Returns:
(tuple): Home and Away winning and clean sheets odds
"""
m = score_mtx(row["score1_infered"], row["score2_infered"])
home_win_p, draw_p, away_win_p = odds(m)
home_cs_p, away_cs_p = clean_sheet(m)
return home_win_p, draw_p, away_win_p, home_cs_p, away_cs_p
(
fixtures_df["home_win_p"],
fixtures_df["draw_p"],
fixtures_df["away_win_p"],
fixtures_df["home_cs_p"],
fixtures_df["away_cs_p"]
) = zip(*fixtures_df.apply(
lambda row: synthesize_odds(row), axis=1))
return fixtures_df
def predict_posterior(self, games):
"""Predict the outcome of games using posterior sampling
Although I think this method is mathematically more sound,
it gives worst results
Args:
games (pd.DataFrame): Fixtures
Returns:
pd.DataFrame: Fixtures with game odds
"""
with self.model:
pm.set_data(
{
"home_team": games.hg.values,
"away_team": games.ag.values,
"score1_obs": np.repeat(0, games.ag.values.shape[0]),
"score2_obs": np.repeat(0, games.ag.values.shape[0]),
}
)
post_pred = pm.sample_posterior_predictive(self.trace.posterior)
parameter_df = (
pd.DataFrame()
.assign(attack=[
np.mean([x[team] for x in self.trace.posterior["atts"]])
for team in range(self.league_size)])
.assign(defence=[
np.mean([x[team] for x in self.trace.posterior["defs"]])
for team in range(self.league_size)])
.assign(team=np.array(self.teams.team_index.values))
)
fixtures_df = (
pd.merge(games, parameter_df, left_on='hg', right_on='team')
.rename(columns={"attack": "attack1", "defence": "defence1"})
.merge(parameter_df, left_on='ag', right_on='team')
.rename(columns={"attack": "attack2", "defence": "defence2"})
.drop("team_y", axis=1)
.drop("team_x", axis=1)
.assign(home_adv=np.mean([x for x in self.trace.posterior["home"]]))
.assign(intercept=np.mean([x for x in self.trace.posterior["intercept"]]))
)
fixtures_df["score1_infered"] = post_pred["home_goals"].mean(axis=0)
fixtures_df["score2_infered"] = post_pred["away_goals"].mean(axis=0)
fixtures_df["home_win_p"] = (
(post_pred["home_goals"] > post_pred["away_goals"]).mean(axis=0)
)
fixtures_df["away_win_p"] = (
(post_pred["home_goals"] < post_pred["away_goals"]).mean(axis=0)
)
fixtures_df["draw_p"] = (
(post_pred["home_goals"] == post_pred["away_goals"]).mean(axis=0)
)
return fixtures_df
def evaluate(self, games):
""" Evaluate the model's prediction accuracy
Args:
games (pd.DataFrame): Fixtured to evaluate on
Returns:
pd.DataFrame: df with appended metrics
"""
fixtures_df = self.predict(games)
fixtures_df["winner"] = match_outcome(fixtures_df)
fixtures_df["rps"] = fixtures_df.apply(
lambda row: ranked_probability_score([
row["home_win_p"], row["draw_p"],
row["away_win_p"]], row["winner"]), axis=1)
return fixtures_df
def backtest(self, train_games, test_season, path='', save=True):
""" Test the model's accuracy on past/finished games by iteratively
training and testing on parts of the data.
Args:
train_games (pd.DataFrame): All the training samples
test_season (int): Season to use a test set
path (string): Path extension to adjust to ipynb use
save (boolean): Save predictions to disk
Returns:
(float): Evaluation metric
"""
# Get training data
self.train_games = train_games
# Initialize model
self.__init__(
self.train_games[self.train_games['season'] != test_season],
performance=self.performance,
decay=self.decay)
# Initial train
self.fit()
# Get test data
# Separate testing based on per GW intervals
fixtures = (
pd.read_csv(
f"{path}data/fpl_official/vaastav/data/2021-22/fixtures.csv")
.loc[:, ['event', 'kickoff_time']])
fixtures["kickoff_time"] = (
pd.to_datetime(fixtures["kickoff_time"]).dt.date)
# Get only EPL games from the test season
self.test_games = (
self.train_games
.loc[self.train_games['league_id'] == 2411]
.loc[self.train_games['season'] == test_season]
.dropna()
)
self.test_games["kickoff_time"] = (
pd.to_datetime(self.test_games["date"]).dt.date)
# Merge on date
self.test_games = pd.merge(
self.test_games,
fixtures,
left_on='kickoff_time',
right_on='kickoff_time')
# Add the home team and away team index for running inference
self.test_games = (
pd.merge(
self.test_games,
self.teams,
left_on="team1",
right_on="team")
.rename(columns={"team_index": "hg"})
.drop(["team"], axis=1)
.drop_duplicates()
.merge(self.teams, left_on="team2", right_on="team")
.rename(columns={"team_index": "ag"})
.drop(["team"], axis=1)
.sort_values("date")
)
predictions = pd.DataFrame()
for gw in tqdm(range(1, 39)):
# For each GW of the season
if gw in self.test_games['event'].values:
# Handle case when the season is not finished
# Run inference on the specific GW and save data.
predictions = pd.concat([
predictions,
self.evaluate(
self.test_games[self.test_games['event'] == gw])
])
# Retrain model with the new GW added to the train set.
self.__init__(
pd.concat([
self.train_games[
self.train_games['season'] != test_season],
self.test_games[self.test_games['event'] <= gw]
])
.drop(columns=['ag', 'hg']),
performance=self.performance,
decay=self.decay)
self.fit()
if save:
(
predictions
.loc[:, [
'date', 'team1', 'team2', 'event', 'hg', 'ag',
'attack1', 'defence1', 'attack2', 'defence2',
'home_adv', 'intercept',
'score1_infered', 'score2_infered',
'home_win_p', 'draw_p', 'away_win_p', 'home_cs_p',
'away_cs_p']]
.to_csv(
f"{path}data/predictions/fixtures/bayesian" +
f"{'' if self.decay else '_no_decay'}" +
f"{'_xg' if self.performance == 'xg' else ''}.csv",
index=False)
)
return predictions
if __name__ == "__main__":
with open('info.json') as f:
season = json.load(f)['season']
next_gw = get_next_gw()
df = | pd.read_csv("data/fivethirtyeight/spi_matches.csv") | pandas.read_csv |
#Script to do a grid search of gas dump mass and gas dump time
#Compares against 4 different sets of ages - linear correct form astroNN; lowess correct from astroNN; Sanders & Das; APOKASC
import numpy as np
import matplotlib.pyplot as plt
import math
import h5py
import json
from astropy.io import fits
from astropy.table import Table, join
import pandas as pd
import subprocess
import os
import sys
sys.path.append('./scripts/')
from chemevo import *
data_file_1 = '/data/ktfm2/apogee_data/apogee_astroNN_DR16.fits' #The astroNN VAC for APOGEE DR16
hdf5_file = '/data/ktfm2/apogee_data/gaia_spectro.hdf5' #The hdf5 file for Sanders and Das
data_file_2 = '/data/jls/apokasc_astroNN.fits' #The APOKASC data file joined with AstroNN
hdf = h5py.File(hdf5_file, "r")
dataset = hdf['data']
log_age_data = dataset["log10_age"]
ID_data = dataset["APOGEE_ID"]
SD_table = Table([ID_data, log_age_data], names=('apogee_id', 'log_age_data'))
hdu_list_1 = fits.open(data_file_1, memmap=True) #open fits file
apogee_data = Table(hdu_list_1[1].data) #Creates table from fits file
hdu_list_1.close() #Close the fits file
hdu_list_2 = fits.open(data_file_2, memmap=True) #open fits file
apokasc_data = Table(hdu_list_2[1].data) #Creates table from fits file
hdu_list_2.close() #Close the fits file
#Join tables together
full_table = join(apogee_data, SD_table)
#Define functions for the filter
def betw(x,l,u):
return (x>l)&(x<u)
def outs(x,l,u):
return (x<l)|(x>u)
#Define filter for apogee data, use guiding centre radius RL, galactic height GALZ, surface gravity LOGG
#Have 4 different filters and so on for linear age, lowess age, S&D age, APOKASC age - this extends to have disc stars
NaN_bit1 = (~pd.isna(apogee_data['rl']))&(~ | pd.isna(apogee_data['age_lowess_correct']) | pandas.isna |
import builtins
from io import StringIO
from itertools import product
from string import ascii_lowercase
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import (
DataFrame, Index, MultiIndex, Series, Timestamp, date_range, isna)
import pandas.core.nanops as nanops
from pandas.util import testing as tm
@pytest.mark.parametrize("agg_func", ['any', 'all'])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("vals", [
['foo', 'bar', 'baz'], ['foo', '', ''], ['', '', ''],
[1, 2, 3], [1, 0, 0], [0, 0, 0],
[1., 2., 3.], [1., 0., 0.], [0., 0., 0.],
[True, True, True], [True, False, False], [False, False, False],
[np.nan, np.nan, np.nan]
])
def test_groupby_bool_aggs(agg_func, skipna, vals):
df = DataFrame({'key': ['a'] * 3 + ['b'] * 3, 'val': vals * 2})
# Figure out expectation using Python builtin
exp = getattr(builtins, agg_func)(vals)
# edge case for missing data with skipna and 'any'
if skipna and all(isna(vals)) and agg_func == 'any':
exp = False
exp_df = DataFrame([exp] * 2, columns=['val'], index=Index(
['a', 'b'], name='key'))
result = getattr(df.groupby('key'), agg_func)(skipna=skipna)
tm.assert_frame_equal(result, exp_df)
def test_max_min_non_numeric():
# #2700
aa = DataFrame({'nn': [11, 11, 22, 22],
'ii': [1, 2, 3, 4],
'ss': 4 * ['mama']})
result = aa.groupby('nn').max()
assert 'ss' in result
result = aa.groupby('nn').max(numeric_only=False)
assert 'ss' in result
result = aa.groupby('nn').min()
assert 'ss' in result
result = aa.groupby('nn').min(numeric_only=False)
assert 'ss' in result
def test_intercept_builtin_sum():
s = Series([1., 2., np.nan, 3.])
grouped = s.groupby([0, 1, 2, 2])
result = grouped.agg(builtins.sum)
result2 = grouped.apply(builtins.sum)
expected = grouped.sum()
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
# @pytest.mark.parametrize("f", [max, min, sum])
# def test_builtins_apply(f):
@pytest.mark.parametrize("f", [max, min, sum])
@pytest.mark.parametrize('keys', [
"jim", # Single key
["jim", "joe"] # Multi-key
])
def test_builtins_apply(keys, f):
# see gh-8155
df = pd.DataFrame(np.random.randint(1, 50, (1000, 2)),
columns=["jim", "joe"])
df["jolie"] = np.random.randn(1000)
fname = f.__name__
result = df.groupby(keys).apply(f)
ngroups = len(df.drop_duplicates(subset=keys))
assert_msg = ("invalid frame shape: {} "
"(expected ({}, 3))".format(result.shape, ngroups))
assert result.shape == (ngroups, 3), assert_msg
tm.assert_frame_equal(result, # numpy's equivalent function
df.groupby(keys).apply(getattr(np, fname)))
if f != sum:
expected = df.groupby(keys).agg(fname).reset_index()
expected.set_index(keys, inplace=True, drop=False)
tm.assert_frame_equal(result, expected, check_dtype=False)
tm.assert_series_equal(getattr(result, fname)(),
getattr(df, fname)())
def test_arg_passthru():
# make sure that we are passing thru kwargs
# to our agg functions
# GH3668
# GH5724
df = pd.DataFrame(
{'group': [1, 1, 2],
'int': [1, 2, 3],
'float': [4., 5., 6.],
'string': list('abc'),
'category_string': pd.Series(list('abc')).astype('category'),
'category_int': [7, 8, 9],
'datetime': pd.date_range('20130101', periods=3),
'datetimetz': pd.date_range('20130101',
periods=3,
tz='US/Eastern'),
'timedelta': pd.timedelta_range('1 s', periods=3, freq='s')},
columns=['group', 'int', 'float', 'string',
'category_string', 'category_int',
'datetime', 'datetimetz',
'timedelta'])
expected_columns_numeric = Index(['int', 'float', 'category_int'])
# mean / median
expected = pd.DataFrame(
{'category_int': [7.5, 9],
'float': [4.5, 6.],
'timedelta': [pd.Timedelta('1.5s'),
pd.Timedelta('3s')],
'int': [1.5, 3],
'datetime': [pd.Timestamp('2013-01-01 12:00:00'),
pd.Timestamp('2013-01-03 00:00:00')],
'datetimetz': [
pd.Timestamp('2013-01-01 12:00:00', tz='US/Eastern'),
pd.Timestamp('2013-01-03 00:00:00', tz='US/Eastern')]},
index=Index([1, 2], name='group'),
columns=['int', 'float', 'category_int',
'datetime', 'datetimetz', 'timedelta'])
for attr in ['mean', 'median']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_frame_equal(result.reindex_like(expected), expected)
# TODO: min, max *should* handle
# categorical (ordered) dtype
expected_columns = Index(['int', 'float', 'string',
'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['min', 'max']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'string',
'category_string', 'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['first', 'last']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'string',
'category_int', 'timedelta'])
for attr in ['sum']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'category_int'])
for attr in ['prod', 'cumprod']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
# like min, max, but don't include strings
expected_columns = Index(['int', 'float',
'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['cummin', 'cummax']:
f = getattr(df.groupby('group'), attr)
result = f()
# GH 15561: numeric_only=False set by default like min/max
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'category_int',
'timedelta'])
for attr in ['cumsum']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
def test_non_cython_api():
# GH5610
# non-cython calls should not include the grouper
df = DataFrame(
[[1, 2, 'foo'],
[1, np.nan, 'bar'],
[3, np.nan, 'baz']],
columns=['A', 'B', 'C'])
g = df.groupby('A')
gni = df.groupby('A', as_index=False)
# mad
expected = DataFrame([[0], [np.nan]], columns=['B'], index=[1, 3])
expected.index.name = 'A'
result = g.mad()
tm.assert_frame_equal(result, expected)
expected = DataFrame([[0., 0.], [0, np.nan]], columns=['A', 'B'],
index=[0, 1])
result = gni.mad()
tm.assert_frame_equal(result, expected)
# describe
expected_index = | pd.Index([1, 3], name='A') | pandas.Index |
# Copyright 2021 AI Singapore. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import os
import pandas as pd
from rarity.data_loader import CSVDataLoader, DataframeLoader
# add this in the conftest.py under tests folder
from selenium.webdriver.chrome.options import Options
def pytest_setup_options():
options = Options()
# added mainly for integration test in gitlab-ci to resolve
# (unknown error: DevToolsActivePort file doesn't exist)
# (The process started from chrome location /usr/bin/google-chrome is no longer running,
# so ChromeDriver is assuming that Chrome has crashed.)
# solution reference => https://github.com/plotly/dash/issues/1420
options.add_argument('--no-sandbox')
return options
@pytest.fixture
def csv_loader_single_modal_reg():
SAMPLE_DATA_DIR = './tests/sample_data/regression/'
FEATURES_FILE = os.path.join(SAMPLE_DATA_DIR, 'reg_features.csv')
Y_TRUE_FILE = os.path.join(SAMPLE_DATA_DIR, 'reg_yTrue.csv')
Y_PRED_FILE_1 = os.path.join(SAMPLE_DATA_DIR, 'reg_yPreds_modelA.csv')
MODEL_NAMES = ['model_A']
ANALYSIS_TYPE = 'Regression'
data_loader = CSVDataLoader(FEATURES_FILE,
Y_TRUE_FILE,
yPred_file_ls=[Y_PRED_FILE_1],
model_names_ls=MODEL_NAMES,
analysis_type=ANALYSIS_TYPE)
return data_loader
@pytest.fixture
def csv_loader_single_modal_cls():
SAMPLE_DATA_DIR = './tests/sample_data/classification/binary/'
FEATURES_FILE = os.path.join(SAMPLE_DATA_DIR, 'binary_features.csv')
Y_TRUE_FILE = os.path.join(SAMPLE_DATA_DIR, 'binary_yTrue.csv')
Y_PRED_FILE_1 = os.path.join(SAMPLE_DATA_DIR, 'binary_yPreds_modelA.csv')
MODEL_NAMES = ['model_A']
ANALYSIS_TYPE = 'Binary-Classification'
data_loader = CSVDataLoader(FEATURES_FILE,
Y_TRUE_FILE,
yPred_file_ls=[Y_PRED_FILE_1],
model_names_ls=MODEL_NAMES,
analysis_type=ANALYSIS_TYPE)
return data_loader
@pytest.fixture
def csv_loader_bimodal_reg():
SAMPLE_DATA_DIR = './tests/sample_data/regression/'
FEATURES_FILE = os.path.join(SAMPLE_DATA_DIR, 'reg_features.csv')
Y_TRUE_FILE = os.path.join(SAMPLE_DATA_DIR, 'reg_yTrue.csv')
Y_PRED_FILE_1 = os.path.join(SAMPLE_DATA_DIR, 'reg_yPreds_modelA.csv')
Y_PRED_FILE_2 = os.path.join(SAMPLE_DATA_DIR, 'reg_yPreds_modelB.csv')
MODEL_NAMES = ['model_A', 'model_B']
ANALYSIS_TYPE = 'Regression'
data_loader = CSVDataLoader(FEATURES_FILE,
Y_TRUE_FILE,
yPred_file_ls=[Y_PRED_FILE_1, Y_PRED_FILE_2],
model_names_ls=MODEL_NAMES,
analysis_type=ANALYSIS_TYPE)
return data_loader
@pytest.fixture
def csv_loader_bimodal_cls():
SAMPLE_DATA_DIR = './tests/sample_data/classification/binary/'
FEATURES_FILE = os.path.join(SAMPLE_DATA_DIR, 'binary_features.csv')
Y_TRUE_FILE = os.path.join(SAMPLE_DATA_DIR, 'binary_yTrue.csv')
Y_PRED_FILE_1 = os.path.join(SAMPLE_DATA_DIR, 'binary_yPreds_modelA.csv')
Y_PRED_FILE_2 = os.path.join(SAMPLE_DATA_DIR, 'binary_yPreds_modelB.csv')
MODEL_NAMES = ['model_A', 'model_B']
ANALYSIS_TYPE = 'Binary-Classification'
data_loader = CSVDataLoader(FEATURES_FILE,
Y_TRUE_FILE,
yPred_file_ls=[Y_PRED_FILE_1, Y_PRED_FILE_2],
model_names_ls=MODEL_NAMES,
analysis_type=ANALYSIS_TYPE)
return data_loader
@pytest.fixture
def csv_loader_bimodal_cls_multi():
SAMPLE_DATA_DIR = './tests/sample_data/classification/multiclass/'
FEATURES_FILE = os.path.join(SAMPLE_DATA_DIR, 'multiclass_features.csv')
Y_TRUE_FILE = os.path.join(SAMPLE_DATA_DIR, 'multiclass_yTrue.csv')
Y_PRED_FILE_1 = os.path.join(SAMPLE_DATA_DIR, 'multiclass_yPreds_modelA.csv')
Y_PRED_FILE_2 = os.path.join(SAMPLE_DATA_DIR, 'multiclass_yPreds_modelB.csv')
MODEL_NAMES = ['model_A', 'model_B']
ANALYSIS_TYPE = 'Multiclass-Classification'
data_loader = CSVDataLoader(FEATURES_FILE,
Y_TRUE_FILE,
yPred_file_ls=[Y_PRED_FILE_1, Y_PRED_FILE_2],
model_names_ls=MODEL_NAMES,
analysis_type=ANALYSIS_TYPE)
return data_loader
@pytest.fixture
def dataframe_loader_single_modal_reg():
DF_FEATURES = pd.DataFrame([[0.1, 2.5, 3.6], [0.5, 2.2, 6.6]], columns=['x1', 'x2', 'x3'])
DF_Y_TRUE = pd.DataFrame([[22.6], [36.6]], columns=['actual'])
DF_Y_PRED_1 = pd.DataFrame([[22.2], [35.0]], columns=['pred'])
MODEL_NAMES = ['model_A']
ANALYSIS_TYPE = 'Regression'
data_loader = DataframeLoader(DF_FEATURES,
DF_Y_TRUE,
df_yPred_ls=[DF_Y_PRED_1],
model_names_ls=MODEL_NAMES,
analysis_type=ANALYSIS_TYPE)
return data_loader
@pytest.fixture
def dataframe_loader_single_modal_cls():
DF_FEATURES = | pd.DataFrame([[0.1, 2.5, 3.6], [0.5, 2.2, 6.6], [0.3, 2.3, 5.2]], columns=['x1', 'x2', 'x3']) | pandas.DataFrame |
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
def clean_data(path):
data= pd.read_csv(path)
data_clean=data.dropna(axis=0)
data_clean = data_clean[data_clean['Year'] >= 1985]
data_clean = data_clean[data_clean['Year']<=2015]
return data_clean
def load_process(data,year):
data=data[data['Year']==year]
state_list=list(range(1,max(data['State'])+1))
state_list
column_names=['State','percenatge']
df= | pd.DataFrame(columns=column_names) | pandas.DataFrame |
import inspect
import json
import logging
import random
import re
import sys
from collections import defaultdict
from contextlib import redirect_stdout
from datetime import datetime, timedelta
from io import StringIO
from itertools import product
from os import getenv
from os.path import dirname, realpath
from pathlib import Path
from string import Template
import click
import discord
import dunamai as _dunamai
import hupper
import matplotlib
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytz
from dateutil.relativedelta import relativedelta
from humanize import naturaltime
from turnips.archipelago import Archipelago
from turnips.plots import plot_models_range
from yaml import load
try:
from yaml import CLoader as Loader
except ImportError: # pragma: no cover
from yaml import Loader
__version__ = _dunamai.get_version(
"turbot", third_choice=_dunamai.Version.from_any_vcs
).serialize()
matplotlib.use("Agg")
PACKAGE_ROOT = Path(dirname(realpath(__file__)))
RUNTIME_ROOT = Path(".")
# application configuration files
DEFAULT_CONFIG_TOKEN = RUNTIME_ROOT / "token.txt"
DEFAULT_CONFIG_CHANNELS = RUNTIME_ROOT / "channels.txt"
# static application asset data
DATA_DIR = PACKAGE_ROOT / "data"
STRINGS_DATA_FILE = DATA_DIR / "strings.yaml"
FOSSILS_DATA_FILE = DATA_DIR / "fossils.txt"
FISH_DATA_FILE = DATA_DIR / "fish.csv"
BUGS_DATA_FILE = DATA_DIR / "bugs.csv"
ART_DATA_FILE = DATA_DIR / "art.csv"
# persisted user and application data
DB_DIR = RUNTIME_ROOT / "db"
DEFAULT_DB_FOSSILS = DB_DIR / "fossils.csv"
DEFAULT_DB_PRICES = DB_DIR / "prices.csv"
DEFAULT_DB_ART = DB_DIR / "art.csv"
DEFAULT_DB_USERS = DB_DIR / "users.csv"
DEFAULT_DB_FISH = DB_DIR / "fish.csv"
# temporary application files
TMP_DIR = RUNTIME_ROOT / "tmp"
GRAPHCMD_FILE = TMP_DIR / "graphcmd.png"
LASTWEEKCMD_FILE = TMP_DIR / "lastweek.png"
with open(STRINGS_DATA_FILE) as f:
STRINGS = load(f, Loader=Loader)
FISH = pd.read_csv(FISH_DATA_FILE)
BUGS = pd.read_csv(BUGS_DATA_FILE)
ART = pd.read_csv(ART_DATA_FILE)
with open(FOSSILS_DATA_FILE) as f:
FOSSILS_SET = frozenset([line.strip().lower() for line in f.readlines()])
FISH_SET = frozenset(FISH.drop_duplicates(subset="name").name.tolist())
BUGS_SET = frozenset(BUGS.drop_duplicates(subset="name").name.tolist())
ART_SET = frozenset(ART.drop_duplicates(subset="name").name.tolist())
COLLECTABLE_SET = FOSSILS_SET | FISH_SET | BUGS_SET | ART_SET
EMBED_LIMIT = 5 # more embeds in a row than this causes issues
USER_PREFRENCES = [
"hemisphere",
"timezone",
"island",
"friend",
"fruit",
"nickname",
"creator",
]
# Based on values from datetime.isoweekday()
DAYS = {
"monday": 1,
"tuesday": 2,
"wednesday": 3,
"thursday": 4,
"friday": 5,
"saturday": 6,
"sunday": 7,
}
IDAYS = dict(map(reversed, DAYS.items()))
class Validate:
FRUITS = ["apple", "cherry", "orange", "peach", "pear"]
HEMISPHERES = ["northern", "southern"]
@classmethod
def friend(cls, value):
code = re.sub("[^0-9]", "", value)
return code if len(code) == 12 and code.isdigit() else None
@classmethod
def creator(cls, value):
code = re.sub("[^0-9]", "", value)
return code if len(code) == 12 and code.isdigit() else None
@classmethod
def fruit(cls, value):
fruit = value.lower()
return fruit if fruit in cls.FRUITS else None
@classmethod
def hemisphere(cls, value):
home = value.lower()
return home if home in cls.HEMISPHERES else None
@classmethod
def nickname(cls, value):
return value
@classmethod
def timezone(cls, value):
return value if value in pytz.all_timezones_set else None
@classmethod
def island(cls, value):
return value
def s(key, **kwargs):
"""Returns a string from data/strings.yaml with subsitutions."""
data = STRINGS.get(key, "")
assert data, f"error: missing strings key: {key}"
return Template(data).substitute(kwargs)
def h(dt):
"""Convertes a datetime to something readable by a human."""
if hasattr(dt, "tz_convert"): # pandas-datetime-like objects
dt = dt.to_pydatetime()
naive_dt = dt.replace(tzinfo=None)
return naturaltime(naive_dt)
def day_and_time(dt):
"""Converts a datetime to a day and time of day, eg: Monday pm."""
day = IDAYS[dt.isoweekday()]
am_pm = "am" if dt.hour < 12 else "pm"
return f"{day.title()} {am_pm}"
def humanize_months(row):
"""Generator that humanizes months from row data where each month is a column."""
ABBR = {
0: "Jan",
1: "Feb",
2: "Mar",
3: "Apr",
4: "May",
5: "Jun",
6: "Jul",
7: "Aug",
8: "Sep",
9: "Oct",
10: "Nov",
11: "Dec",
}
months = [
row["jan"],
row["feb"],
row["mar"],
row["apr"],
row["may"],
row["jun"],
row["jul"],
row["aug"],
row["sep"],
row["oct"],
row["nov"],
row["dec"],
]
start = None
for m, inc in enumerate(months):
if inc and start is None:
start = m # start of a range
elif not inc and start is None:
continue # range hasn't started yet
elif inc and start is not None:
continue # continuance of a range
else:
lhs = ABBR[start]
rhs = ABBR[m - 1]
if lhs != rhs:
yield f"{lhs} - {rhs}" # previous element ended a range
else:
yield f"{lhs}" # captures a lone element
start = None
if start == 0:
yield "the entire year" # capture total range
elif start is not None:
lhs = ABBR[start]
rhs = ABBR[11]
if lhs != rhs:
yield f"{lhs} - {rhs}" # capture a trailing range
else:
yield f"{lhs}" # captures a trailing lone element
def discord_user_from_name(channel, name):
"""Returns the discord user from the given channel and name."""
if name is None:
return None
lname = name.lower()
members = channel.members
return next(filter(lambda member: lname in str(member).lower(), members), None)
def discord_user_from_id(channel, user_id):
"""Returns the discord user from the given channel and user id."""
if user_id is None:
return None
iid = int(user_id)
members = channel.members
return next(filter(lambda member: iid == member.id, members), None)
def discord_user_name(channel, name_or_id):
"""Returns the discord user name from the given channel and name or id."""
if not name_or_id:
return None
user = (
discord_user_from_id(channel, name_or_id)
if isinstance(name_or_id, int) or name_or_id.isdigit()
else discord_user_from_name(channel, name_or_id)
)
return str(user) if user else None
def discord_user_id(channel, name):
"""Returns the discord user id name from the given channel and name."""
if not name:
return None
return getattr(discord_user_from_name(channel, name), "id", None)
def is_turbot_admin(channel, user_or_member):
"""Checks to see if given user or member has the Turbot Admin role on this server."""
member = (
user_or_member
if hasattr(user_or_member, "roles") # members have a roles property
else channel.guild.get_member(user_or_member.id) # but users don't
)
return any(role.name == "Turbot Admin" for role in member.roles) if member else False
def command(f):
f.is_command = True
return f
class Turbot(discord.Client):
"""Discord turnip bot"""
def __init__(
self,
token="",
channels=[],
prices_file=DEFAULT_DB_PRICES,
art_file=DEFAULT_DB_ART,
fish_file=DEFAULT_DB_FISH,
fossils_file=DEFAULT_DB_FOSSILS,
users_file=DEFAULT_DB_USERS,
log_level=None,
):
if log_level: # pragma: no cover
logging.basicConfig(level=log_level)
super().__init__()
self.token = token
self.channels = channels
self.prices_file = prices_file
self.art_file = art_file
self.fish_file = fish_file
self.fossils_file = fossils_file
self.users_file = users_file
self.base_prophet_url = "https://turnipprophet.io/?prices=" # TODO: configurable?
self._prices_data = None # do not use directly, load it from load_prices()
self._art_data = None # do not use directly, load it from load_art()
self._fish_data = None # do not use directly, load it from load_fish()
self._fossils_data = None # do not use directly, load it from load_fossils()
self._users_data = None # do not use directly, load it from load_users()
self._last_backup_filename = None
# build a list of commands supported by this bot by fetching @command methods
members = inspect.getmembers(self, predicate=inspect.ismethod)
self._commands = [
member[0]
for member in members
if hasattr(member[1], "is_command") and member[1].is_command
]
def run(self): # pragma: no cover
super().run(self.token)
def save_prices(self, data):
"""Saves the given prices data to csv file."""
data.to_csv(self.prices_file, index=False) # persist to disk
self._prices_data = data # in-memory optimization
def last_backup_filename(self):
"""Return the name of the last known backup file for prices or None if unknown."""
return self._last_backup_filename
def backup_prices(self, data):
"""Backs up the prices data to a datetime stamped file."""
filename = datetime.now(pytz.utc).strftime(
"prices-%Y-%m-%d.csv" # TODO: configurable?
)
filepath = Path(self.prices_file).parent / filename
self._last_backup_filename = filepath
data.to_csv(filepath, index=False)
def load_prices(self):
"""Loads up and returns the application price data as a DataFrame."""
if self._prices_data is not None:
return self._prices_data
cols = ["author", "kind", "price", "timestamp"]
dtypes = ["int64", "object", "int64", "datetime64[ns, UTC]"]
if Path(self.prices_file).exists():
self._prices_data = pd.read_csv(
self.prices_file, names=cols, parse_dates=True, skiprows=1
)
else:
self._prices_data = pd.read_csv(
StringIO(""), names=cols, dtype=dict(zip(cols, dtypes))
)
self._prices_data = self._prices_data.astype(dict(zip(cols, dtypes)))
return self._prices_data
def save_users(self, data):
"""Saves the given users data to csv file."""
data.to_csv(self.users_file, index=False) # persist to disk
self._users_data = data # in-memory optimization
def load_users(self):
"""Returns a DataFrame of user data or creates an empty one."""
if self._users_data is not None:
self._users_data = self._users_data.fillna("")
return self._users_data
cols = ["author", *USER_PREFRENCES]
dtypes = ["int64", "str", "str", "str", "str", "str", "str", "str"]
if Path(self.users_file).exists():
self._users_data = pd.read_csv(self.users_file, names=cols, skiprows=1)
else:
self._users_data = pd.read_csv(
StringIO(""), names=cols, dtype=dict(zip(cols, dtypes))
)
self._users_data = self._users_data.fillna("")
self._users_data = self._users_data.astype(dict(zip(cols, dtypes)))
return self._users_data
def save_art(self, data):
"""Saves the given art data to csv file."""
data.to_csv(self.art_file, index=False) # persist to disk
self._art_data = data # in-memory optimization
def load_art(self):
"""Returns a DataFrame of art data or creates an empty one."""
if self._art_data is None:
try:
self._art_data = pd.read_csv(self.art_file)
except FileNotFoundError:
self._art_data = pd.DataFrame(columns=["author", "name"])
return self._art_data
def save_fish(self, data):
"""Saves the given fish data to csv file."""
data.to_csv(self.fish_file, index=False) # persist to disk
self._fish_data = data # in-memory optimization
def load_fish(self):
"""Returns a DataFrame of fish data or creates an empty one."""
if self._fish_data is None:
try:
self._fish_data = pd.read_csv(self.fish_file)
except FileNotFoundError:
self._fish_data = | pd.DataFrame(columns=["author", "name"]) | pandas.DataFrame |
import os
from datetime import datetime
import pandas as pd
import src.config.constants as constants
import src.munging as process_data
import src.common as common
import src.modeling as train_util
if __name__ == "__main__":
RUN_ID = datetime.now().strftime("%m%d_%H%M")
MODEL_NAME = os.path.basename(__file__).split(".")[0]
logger = common.get_logger("blend")
train_df, test_df, sample_submission_df = process_data.read_processed_data(
logger,
constants.PROCESSED_DATA_DIR,
train=True,
test=True,
sample_submission=True,
)
# https://www.kaggle.com/xiaoxiaoxiaoxiaoxiao/averaging-top-5-solutions
# PL: 0.81856
df_1 = pd.read_csv(
f"{constants.PUB_SUBMISSION_DIR}/averaging-top-5-solutions/submission.csv"
)
# stacking_lgb_xbg_cat_imputer_no_imputer
# PL: 0.81803
df_2 = pd.read_csv(
f"{constants.SUBMISSION_DIR}/sub_stacking_lgb_xbg_cat_imputer_no_imputer_v2_0929_1549_0.81680.gz"
)
merged_df = | pd.merge(df_1, df_2, how="left", on="id") | pandas.merge |
# This module decodes the checkpoint
# files and returns the output according
# to the desired output format
import os
import os.path
import configparser
import struct
import re
import sys
import csv
import time
from itertools import zip_longest
import numpy as np
import pandas as pd
import h5py
# Error codes
# 1001: 'Unknown level'
# 1002: 'Invalid output format'
# 1003: 'Missing totalRanks'
# 2001: 'Config file not found'
# 2002: 'Checkpoint file not found'
# 2003: 'Checkpoint file empty'
# 2004: 'Meta file not found'
# 2005: 'Failed to recover MPI-IO file'
# 3001: 'No variable found in checkpoint'
# 3002: 'Error while writing data to CSV'
# 3003: 'Unavailable mapping of long double for decoding'
# 3004: 'Unsupported decoding for self-defined variable'
# 3005: 'Unsupported decoding for given data type'
nbVars = 0
ckpt_file_size = 0
varheaders = [] # var headers
# variable object
class variable(object):
def __init__(self, var_id, var_size, var_typeid,
var_typesize, var_position,
var_name=None, var_ndims=None, var_dims=None):
self.var_id = var_id
self.var_size = var_size
self.var_typeid = var_typeid
self.var_typesize = var_typesize
self.var_position = var_position
if var_name is not None:
self.var_name = var_name
if var_ndims is not None:
self.var_ndims = var_ndims
if var_dims is not None:
self.var_dims = var_dims
# This function reads the given meta data
# and returns a list of the variables found
# in the ckpt file
def read_meta(meta_file, ckpt_file, group_size, level):
ckpt_file = ckpt_file.rsplit('/', 1)[1]
mysection = ""
data = []
# count nbVars from meta_file
regex = "var[-0-9]+_id"
var_pattern = re.compile(regex)
# parse and get value by key
print("reading meta file:", meta_file)
config = configparser.ConfigParser()
config.read(meta_file)
# get nbVars
global nbVars
nbVars = 0 # initialize it for every meta file
if level == 4:
# For level 4, ckpt file name does not match ckpt_file_name's value
for section in config.sections(): # traverse sections
# read from the first section with a digit as a key
if section.isdigit() is True:
mysection = section
else: # for any level
for section in config.sections(): # traverse sections
if section.isdigit() is True:
if config[section]['ckpt_file_name'] == ckpt_file:
mysection = section
break
for (each_key, each_val) in config.items(mysection):
# check var pattern to increment nbVars variable
if var_pattern.match(each_key) and each_key.endswith('_id'):
nbVars = nbVars + 1
if nbVars == 0:
print("No variable found in Checkpoint file")
sys.exit(3001)
print("Number of variables to read = "+str(nbVars))
# create numpy array for variables (instead of data)
datanumpy = np.array([])
# get data for each Var
# for i in range(int(group_size)):
for j in range(nbVars):
var_id = config['0']['var'+str(j)+'_id']
var_size = config['0']['var'+str(j)+'_size']
var_typeid = config['0']['var'+str(j)+'_typeid']
var_typesize = config['0']['var'+str(j)+'_typesize']
var_position = config['0']['var'+str(j)+'_pos']
var_name = None
var_ndims = 0
var_dims = []
if (config.has_option('0', 'var'+str(j)+'_name') is True and
config['0']['var'+str(j)+'_name']):
var_name = config['0']['var'+str(j)+'_name']
if config.has_option('0', 'var'+str(j)+'_ndims') is True:
# if variable dims set by FTI_SetAttribute()
var_ndims = int(config['0']['var'+str(j)+'_ndims'])
if var_ndims != 0:
for k in range(var_ndims):
dim = config['0']['var'+str(j)+'_dim'+str(k)]
var_dims.append(dim)
datanumpy = np.append(datanumpy, variable(var_id, var_size,
var_typeid, var_typesize,
var_position, var_name,
var_ndims, var_dims))
return datanumpy
# This function reads the ckpt file
# and saves its content to out.csv
def read_checkpoint(ckpt_file, meta_file,
config_file, group_size,
level, output):
if os.path.exists(ckpt_file) is False: # FileNotFoundError
print("No checkpoint file found")
else:
if os.stat(ckpt_file).st_size == 0:
print("Checkpoint file empty")
sys.exit(2003)
else:
print(
"Found checkpoint file with size ",
os.path.getsize(ckpt_file))
file = open(ckpt_file, "rb")
# read meta data
data = read_meta(meta_file, ckpt_file, group_size, level)
# read Checkpoint
allvarsnumpy = np.empty((1, nbVars), dtype=object)
for i in range(nbVars):
# for each variable: create list per variable to hold
# the value of the variable to be exported to the csv file
file.seek(int(data[i].var_position), os.SEEK_SET)
var = file.read(int(data[i].var_size))
# process the datatype
if int(data[i].var_typeid) == -1:
print("skipping var#", str(i))
print("Not a primitive data type")
# skip this variable
# only primitive types are decoded as of this version
continue
decode_pattern, dtype = decode_fti_type(data[i].var_typeid)
# data[i].var_ndims already has data
# if var has no dimension:: one element
data[i].var_ndims = int(data[i].var_ndims)
# should verify if dimensions are correct
if (int(data[i].var_size) == int(data[i].var_typesize)
and data[i].var_ndims == 0):
# single var
decoded_var = struct.unpack(decode_pattern, var)
varnumpy = np.array([])
varnumpy = np.append(varnumpy, decoded_var)
else: # multi-dim variable
subvars = int(data[i].var_size) \
// (int(data[i].var_typesize))
decode_pattern = str(subvars)+decode_pattern
decoded_var = struct.unpack(decode_pattern, var)
varnumpy = np.array([])
# for v in range(subvars):
varnumpy = np.append(varnumpy, decoded_var) # needs debugging
if hasattr(data[i], 'var_name'):
varheaders.append(data[i].var_name)
else:
varheaders.append("var#"+str(i))
allvarsnumpy[0, i] = varnumpy
file.close()
allvarsnumpy = allvarsnumpy[0]
if output == 'CSV':
write_data_to_csv(allvarsnumpy, varheaders)
elif output == 'HDF5':
write_data_to_hdf5(allvarsnumpy, varheaders)
elif output == 'data':
return allvarsnumpy
# This function writes the variables
# stored in a numpy array to the ouput csv file
def write_data_to_csv(allvarsnumpy, varheaders):
panda = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
from skmultilearn.model_selection.iterative_stratification import IterativeStratification
from sklearn.model_selection import train_test_split
from pynet.utils import get_pickle_obj
import matplotlib.pyplot as plt
import numpy as np
import os, copy
## This script aims at giving Train/Val/Test scheme for the release of OpenBHB dataset.
## It also aims at giving the BHB extension with private datasets. For now, only Training/Val/Test scheme is
## implemented with Val/(Test-Intra + Test-Inter) preserved as before.
def discretize_continous_label(labels, bins='sturges', verbose=False):
# Get an estimation of the best bin edges. 'Sturges' is conservative for pretty large datasets (N>1000).
bin_edges = np.histogram_bin_edges(labels, bins=bins)
if verbose:
print('Global histogram:\n', np.histogram(labels, bins=bin_edges, density=False), flush=True)
# Discretizes the values according to these bins
discretization = np.digitize(labels, bin_edges[1:], right=True)
if verbose:
print('Bin Counts after discretization:\n', np.bincount(discretization), flush=True)
return discretization
def get_stratification_split(labels, n_test=0.1, preserve:str=None, seed:int=None):
np.random.seed(seed)
dummy = np.arange(len(labels)).reshape(len(labels), -1)
n_splits = int(1/n_test)
stratifier = IterativeStratification(n_splits=n_splits, order=1)
# iterate until we find train/test split preserving the variable "preserve"
for _ in range(n_splits):
train, test = next(stratifier.split(dummy, labels.values))
if preserve is None or set(labels.iloc[train][preserve]) == set(labels.iloc[test][preserve]) \
== set(labels[preserve]):
return (train, test)
print("Impossible to stratify by preserving %s"%preserve)
return None
### Defines the splits used for OpenBHB-10K
path = "/neurospin/hc/openBHB/participants.tsv"
test_path = ["/neurospin/psy_sbox/icbm/ICBM_t1mri_mwp1_participants.csv",
"/neurospin/psy_sbox/hcp_development/participants.csv"]
open_bhb10k = ['abide1', 'abide2', 'ixi', 'npc', 'rbp', 'gsp', 'localizer', 'mpi-leipzig', 'corr', 'nar']
df = | pd.read_csv(path, sep="\t") | pandas.read_csv |
#!/usr/bin/env python
import numpy as np
import pandas as pd
import pytest
from modnet.preprocessing import get_cross_nmi
from modnet.preprocessing import nmi_target
def test_nmi_target():
# Test with linear data (should get 1.0 mutual information, or very close due to algorithm used
# in mutual_info_regression)
npoints = 31
x = np.linspace(0.5, 3.5, npoints)
y = 2*x - 2
z = 4*x + 2
df_feat = pd.DataFrame({'x': x, 'y': y})
df_target = pd.DataFrame({'z': z})
# Here we fix the number of neighbors for the call to sklearn.feature_selection's mutual_info_regression to 2 so
# that we get exactly 1 for the mutual information.
df_nmi_target = nmi_target(df_feat=df_feat, df_target=df_target, n_neighbors=2)
assert df_nmi_target.shape == (2, 1)
assert df_nmi_target.loc['x']['z'] == pytest.approx(1.0)
assert df_nmi_target.loc['y']['z'] == pytest.approx(1.0)
# Same data shuffled
# Shuffle the x, y and z
indices = np.arange(npoints)
np.random.seed(42)
np.random.shuffle(indices)
xs = x.take(indices)
ys = y.take(indices)
zs = z.take(indices)
df_feat = pd.DataFrame({'x': xs, 'y': ys})
df_target = pd.DataFrame({'z': zs})
df_nmi_target = nmi_target(df_feat=df_feat, df_target=df_target, n_neighbors=2)
assert df_nmi_target.shape == (2, 1)
assert df_nmi_target.loc['x']['z'] == pytest.approx(1.0)
assert df_nmi_target.loc['y']['z'] == pytest.approx(1.0)
# Test with one constant feature
c = np.ones(npoints) * 1.4
df_feat = pd.DataFrame({'x': x, 'y': y, 'c': c})
df_target = pd.DataFrame({'z': z})
df_nmi_target = nmi_target(df_feat=df_feat, df_target=df_target, n_neighbors=2)
assert df_nmi_target.shape == (2, 1)
assert df_nmi_target.loc['x']['z'] == pytest.approx(1.0)
assert df_nmi_target.loc['y']['z'] == pytest.approx(1.0)
df_nmi_target = nmi_target(df_feat=df_feat, df_target=df_target, drop_constant_features=False, n_neighbors=2)
assert df_nmi_target.shape == (3, 1)
assert df_nmi_target.loc['x']['z'] == pytest.approx(1.0)
assert df_nmi_target.loc['y']['z'] == pytest.approx(1.0)
assert df_nmi_target.loc['c']['z'] == pytest.approx(0.0)
# Test with unrelated data (grid)
x = np.linspace(start=2, stop=5, num=4)
z = np.linspace(start=3, stop=7, num=5)
x, z = np.meshgrid(x, z)
x = x.flatten()
z = z.flatten()
df_feat = pd.DataFrame({'x': x})
df_target = pd.DataFrame({'z': z})
df_nmi_target = nmi_target(df_feat=df_feat, df_target=df_target)
assert df_nmi_target.shape == (1, 1)
assert df_nmi_target.loc['x']['z'] == pytest.approx(0.0)
# Test initial checks
# Incompatible shapes
x = np.linspace(start=2, stop=3, num=5)
z = np.linspace(start=2, stop=3, num=8)
df_feat = pd.DataFrame({'x': x})
df_target = pd.DataFrame({'z': z})
with pytest.raises(ValueError, match=r'The input features DataFrame and the target variable DataFrame '
r'should contain the same number of data points.'):
nmi_target(df_feat=df_feat, df_target=df_target)
# Target DataFrame does not have exactly one column
x = np.linspace(start=2, stop=3, num=5)
z = np.linspace(start=2, stop=3, num=5)
df_feat = pd.DataFrame({'x': x})
df_target = pd.DataFrame({'z2': z, 'z': z})
with pytest.raises(ValueError, match=r'The target DataFrame should have exactly one column.'):
nmi_target(df_feat=df_feat, df_target=df_target)
# Test with some more real data (for which NMI is not just 0.0 or 1.0)
npoints = 200
np.random.seed(42)
x = np.random.rand(npoints)
z = 4 * x + 1.0 * np.random.rand(npoints)
df_feat = pd.DataFrame({'x': x})
df_target = | pd.DataFrame({'z': z}) | pandas.DataFrame |
"""
Calculate various stats regarding how associated a context token is with contentiousness.
process:
* load the annotation set (controls already removed):
* remove 'weet ik niet' and 'Onleesbare...' annotations
* case-fold target words
* With respect to the text-analysed context tokens
* build a hash of {context token|str| : co-occurrent extract ids|set|, ...}
* build a hash of {context token|str| : Counter{target1: 2, target2:1,... }, ...}
* build and save a dataframe with:
* e.g., ratios of count(token, contentious) / count(token), binomial test
such to measure significance wrt., contentious sample or
non-contentious sample association
"""
import os
from collections import Counter, defaultdict
import pandas as pd
from scipy.stats import binom_test
from statsmodels.stats.proportion import proportion_confint
from tqdm import tqdm
def main():
sav = "stats/associations.csv"
os.makedirs(os.path.dirname(sav), exist_ok=True)
# pass of sav exists
if os.path.exists(sav):
pass
else:
# load data
with open("data.csv", "r") as f:
data = pd.read_csv(f)
# remove all 'weet ik niet' and 'Onleesbare ...'
data = data[
data["response"].isin(
["Omstreden naar huidige maatstaven", "Niet omstreden"]
)
]
# case fold target_compound
data['target_compound'] = data['target_compound'].str.lower()
# get a series of text-analysed contexts by extract_id
contexts: pd.Series = (
data.groupby(["extract_id"]).first().loc[:, "text_analysed"]
)
# load dataframe of | extract_id | label |
with open("majority_vote/majority_vote.csv", "r") as f:
majority_vote = pd.read_csv(f)
majority_vote = majority_vote.set_index("extract_id")
# build {context token: list of extract_ids}
extracts_by_context_token = defaultdict(list)
for extract_id, text in contexts.iteritems():
for context_token in set(
[
t
for s in text.split("<sent>")
for t in s.split(" ")
if t != "" and t != " "
]
):
extracts_by_context_token[context_token].append(extract_id)
# build {context token: list of associated target words}
target_words_by_id = (
data.groupby(["extract_id"]).first().loc[:, "target_compound"]
)
target_words_by_context_token = defaultdict(Counter) #
for context_token, extract_ids in extracts_by_context_token.items():
for extract_id in extract_ids:
target = target_words_by_id.at[extract_id]
target_words_by_context_token[context_token][target] += 1
# ------
# build df of statistics
# ------
stats = defaultdict(list)
for context_token, extract_ids in extracts_by_context_token.items():
votes = [
majority_vote.at[extract_id, "label"] for extract_id in extract_ids
]
stats["token"].append(context_token)
stats["num_corresponding_extracts"].append(len(votes))
count_c = sum([1 for v in votes if v == 1])
count_n = sum([1 for v in votes if v == 0])
count_u = sum([1 for v in votes if v == 0.5])
count = len(votes)
# attribute point estimates and p-values
stats["proportion_with_contentious"].append((count_c) / (count))
stats["contentious_p_value"].append(
binom_test(count_c, count, p=0.183, alternative="greater")
)
stats["proportion_with_non_contentious"].append((count_n) / (count))
stats["non_contentious_p_value"].append(
binom_test(count_n, count, p=0.796, alternative="greater")
)
stats["proportion_with_no_majority"].append((count_u) / (count))
# attibute number of target words and number of instances
targets = target_words_by_context_token[context_token]
target_string = ", ".join(
[t + f"({count})" for t, count in targets.items()]
)
stats['targets_count'].append(sum([1 for t, c in targets.items()]))
stats["targets"].append(target_string)
# save as a dataframe
stats = | pd.DataFrame.from_dict(stats) | pandas.DataFrame.from_dict |
import tensorflow.keras as kr
# Numerical arrays
import numpy as np
# Data frames.
import pandas as pd
# Plotting
import matplotlib.pyplot as plt
# neural neworks
import tensorflow.keras as kr
# this code all explained in jupyter notebook
def load_model():
data = pd.read_csv('power_production.csv')
# drop rows where the power putput is zero even though wind speed > 10
# https://stackoverflow.com/questions/52456874/drop-rows-on-multiple-conditions-in-pandas-dataframe
df_new = data.drop(data[(data['speed'] > 10.0) & (data['power'] == 0.0)].index)
train = | pd.DataFrame() | pandas.DataFrame |
"""
Функции и классы для проведения WoE-преобразований
"""
import math
import warnings
import numpy as np
import pandas as pd
import sklearn as sk
from IPython.display import display
from matplotlib import pyplot as plt
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import train_test_split
from tqdm.auto import tqdm
class _GroupedPredictor(pd.DataFrame):
"""
Вспомогательный класс для удобства доступа к некоторым данным
"""
def get_predictor(self, x):
"""
Получение подвыборки по имени предиктора(ов)
Parameters
---------------
x : str/int/list-like
Предиктор или список предикторов
Returns:
-----------
self : pd.DataFrame
Часть датафрейма (самого себя)
"""
if isinstance(x, (list, set, tuple)):
return self[self["predictor"].isin(x)]
else:
return self[self["predictor"] == x]
def append(self, other):
return _GroupedPredictor(super().append(other))
class WoeTransformer(TransformerMixin, BaseEstimator):
"""Класс для построения и применения WOE группировки к датасету
Parameters
----------
min_sample_rate : float, default 0.05
Минимальный размер группы (доля от размера выборки)
min_count : int, default 3
Минимальное количество наблюдений каждого класса в группе
save_data : bool, default False
Параметр, определяющий, нужно ли сохранить данные для обучения
трансформера внутри экземпляра класса
join_bad_categories : bool, default False
Определяет, должени ли трансформер предпринять попытку для объединения
катогориальных групп в более крупные
Warning
-------
join_bad_categories - Экспериментальная функция.
Способ группировки категорий нестабилен
Attributes
----------
stats : pandas.DataFrame
Результаты WOE-группировки по всем предикторам
predictors : list
Список предикторов, на которых была построена группировка
cat_values : dict[str, list]
Словарь со списками категорий по предикторам, переданный при обучении
alpha_values : dict[str, float]
Словарь со значениями alpha для регуляризации групп
possible groups : pandas.DataFrame
Данные о значениях предиктора, которые могли бы стать
отдельными категориями
bad_groups : pandas.DataFrame
Данные о группах, которые не удовлетворяют условиям
"""
def __repr__(self):
return "WoeTransformer(min_sample_rate={!r}, min_count={!r}, n_fitted_predictors={!r})".format(
self.min_sample_rate,
self.min_count,
len(self.predictors),
)
def __init__(
self,
min_sample_rate: float = 0.05,
min_count: int = 3,
save_data: bool = False,
join_bad_categories: bool = False,
):
"""
Инициализация экземпляра класса
"""
self.min_sample_rate = min_sample_rate
self.min_count = min_count
self.predictors = []
self.alpha_values = {}
self.save_data = save_data
self.join_bad_categories = join_bad_categories
# -------------------------
# Функции интерфейса класса
# -------------------------
def fit(self, X, y, cat_values={}, alpha_values={}):
"""
Обучение трансформера и расчет всех промежуточных данных
Parameters
---------------
X : pd.DataFrame
Датафрейм с предикторами, которые нужно сгруппировать
y : pd.Series
Целевая переменная
cat_values : dict[str, list[str]], optional
Словарь списков с особыми значениями, которые нужно
выделить в категории
По умолчанию все строковые и пропущенные значения
выделяются в отдельные категории
alpha_values : dict[str, float], optional
Словарь со значениями alpha для регуляризации WOE-групп
Returns
-------
self : WoeTransformer
"""
# Сброс текущего состояния трансформера
self._reset_state()
# Сохранение категориальных значений
self.cat_values = cat_values
# Валидация данных и решейпинг
if hasattr(self, "_validate_data"):
X, y = self._validate_and_convert_data(X, y)
if self.save_data:
self.data = X
self.target = y
# Инициализация коэффициентов для регуляризации групп
self.alpha_values = {i: 0 for i in X.columns}
self.alpha_values.update(alpha_values)
# Агрегация значений предикторов
self._grouping(X, y)
# Расчет WOE и IV
self._fit_numeric(X, y)
# Поиск потенциальных групп
# Поиск "плохих" групп
self._get_bad_groups()
return self
def transform(self, X, y=None):
"""
Применение обученного трансформера к новым данным
Parameters
---------------
X : pandas.DataFrame
Датафрейм, который нужно преобразовать
Предикторы, которые не были сгруппированы ранее, будут
проигнорированы и выведется сообщение
y : pandas.Series
Игнорируется
Returns
-----------
transformed : pandas.DataFrame
Преобразованный датасет
"""
transformed = pd.DataFrame()
if hasattr(self, "_validate_data"):
try:
X, y = self._validate_and_convert_data(X, y)
except AttributeError:
pass
for i in X:
if i in self.predictors:
try:
transformed[i] = self._transform_single(X[i])
except Exception as e:
print(f"Transform failed on predictor: {i}", e)
else:
print(f"Column is not in fitted predictors list: {i}")
return transformed
def fit_transform(self, X, y, cat_values={}, alpha_values={}):
"""
Обучение трансформера и расчет всех промежуточных данных
с последующим примененим группировки к тем же данным
Parameters
---------------
X : pandas.DataFrame
Датафрейм с предикторами, которые нужно сгруппировать
y : pandas.Series
Целевая переменная
cat_values : dict[str, list[str]], optional
Словарь списков с особыми значениями, которые нужно
выделить в категории
По умолчанию все строковые и пропущенные значения
выделяются в отдельные категории
alpha_values : dict[str, float], optional
Словарь со значениями alpha для регуляризации WOE-групп
Returns
-----------
transformed : pd.DataFrame
Преобразованный датасет
"""
self.fit(X, y, cat_values=cat_values, alpha_values=alpha_values)
return self.transform(X)
def plot_woe(self, predictors=None):
"""
Отрисовка одного или нескольких графиков группировки
Parameters
---------------
predictors : str or array, default None
Предиктор(ы), по которым нужны графики
-- если str - отрисовывается один график
-- если array - отрисовываются графики из списка
-- если None - отрисовываются все сгруппированные предикторы
Warning
-------
Запуск метода без аргументов может занять длительное время при большом
количестве предикторов
"""
if predictors is None:
predictors = self.predictors
elif isinstance(predictors, str):
predictors = [predictors]
elif isinstance(predictors, (list, tuple, set)):
predictors = predictors
_, axes = plt.subplots(figsize=(10, len(predictors) * 5), nrows=len(predictors))
try:
for i, col in enumerate(predictors):
self._plot_single_woe_grouping(self.stats.get_predictor(col), axes[i])
except TypeError:
self._plot_single_woe_grouping(self.stats.get_predictor(col), axes)
# return fig
def get_iv(self, sort=False):
"""Получение списка значений IV по предикторам
Parameters
----------
sort : bool, default False
Включает сортировку результата по убыванию IV
Returns
-------
pandas.Series
"""
try:
res = self.stats.groupby("predictor")["IV"].sum()
if sort:
res = res.sort_values(ascending=False)
res = dict(res)
except AttributeError as e:
print(f"Transformer was not fitted yet. {e}")
res = {}
return res
# -------------------------
# Внутренние функции над всем датасетом
# -------------------------
def _validate_and_convert_data(self, X, y):
"""Проверяеn входные данные, трансформирует в объекты pandas
Использует метод _validate_data из sklearn/base.py
"""
if hasattr(X, "columns"):
predictors = X.columns
else:
predictors = ["X" + str(i + 1) for i in range(X.shape[1])]
if y is None:
X_valid = self._validate_data(X, y, dtype=None, force_all_finite=False)
X_valid = pd.DataFrame(X, columns=predictors)
y_valid = None
else:
X_valid, y_valid = self._validate_data(
X, y, dtype=None, force_all_finite=False
)
y_valid = pd.Series(y, name="target")
X_valid = pd.DataFrame(X, columns=predictors)
return X_valid, y_valid
def _grouping(self, X, y):
"""
Применение группировки ко всем предикторам
"""
df = X.copy()
df = df.fillna("пусто")
df["target"] = y.copy()
# Группировка и расчет показателей
for col in df.columns[:-1]:
grouped_temp = self._group_single(df[col], y)
num_mask = self._get_nums_mask(grouped_temp["value"])
cat_val_mask = grouped_temp["value"].isin(self.cat_values.get(col, []))
is_all_categorical = all(~num_mask | cat_val_mask)
if self.join_bad_categories and is_all_categorical:
repl = self._get_cat_values_for_join(grouped_temp)
grouped_temp = self._group_single(df[col].replace(repl), y)
self.grouped = self.grouped.append(grouped_temp)
# Замена пустых значений обратно на np.nan ИЛИ преобразование в числовой тип
try:
self.grouped["value"] = self.grouped["value"].replace({"пусто": np.nan})
except TypeError:
self.grouped["value"] = pd.to_numeric(
self.grouped["value"], downcast="signed"
)
def _fit_numeric(self, X, y):
"""
Расчет WOE и IV
Parameters:
---------------
X : pd.DataFrame
Датафрейм с предикторами, которые нужно сгруппировать
y : pd.Series
Целевая переменная
Returns
-------
None
"""
res = pd.DataFrame()
for i in X:
res_i = self._fit_single(X[i], y)
res = res.append(res_i)
self.predictors.append(i)
self.stats = self.stats.append(res)
# -------------------------
# Внутренние функции над отдельными столбцами
# -------------------------
def _group_single(self, x, y):
"""
Агрегация данных по значениям предиктора.
Рассчитывает количество наблюдений,
количество целевых событий, долю группы от общего числа наблюдений
и долю целевых в группе
Parameters:
---------------
X : pandas.DataFrame
Таблица данных для агрегации
y : pandas.Series
Целевая переменная
"""
col = x.name
df = pd.DataFrame({col: x.values, "target": y.values})
grouped_temp = df.groupby(col)["target"].agg(["count", "sum"]).reset_index()
grouped_temp.columns = ["value", "sample_count", "target_count"]
grouped_temp["sample_rate"] = (
grouped_temp["sample_count"] / grouped_temp["sample_count"].sum()
)
grouped_temp["target_rate"] = (
grouped_temp["target_count"] / grouped_temp["sample_count"]
)
grouped_temp.insert(0, "predictor", col)
return _GroupedPredictor(grouped_temp)
def _fit_single(self, x, y, gr_subset=None, cat_vals=None):
"""
Расчет WOE и IV
Parameters:
---------------
X : pd.DataFrame
Датафрейм с предикторами, которые нужно сгруппировать
y : pd.Series
Целевая переменная
gr_subset : _GroupedPredictor
Предиктор
"""
gr_subset_num = pd.DataFrame()
gr_subset_cat = pd.DataFrame()
col = x.name
if gr_subset is None:
gr_subset = self.grouped.get_predictor(col)
if cat_vals is None:
cat_vals = self.cat_values.get(col, [])
nan_mask = x.isna()
num_mask = self._get_nums_mask(x) & (~x.isin(cat_vals)) & (~nan_mask)
num_vals = x.loc[num_mask].unique()
try:
# Расчет коэффициентов тренда по числовым значениям предиктора
if num_mask.sum() > 0:
try:
poly_coefs = np.polyfit(
x.loc[num_mask].astype(float), y.loc[num_mask], deg=1
)
except np.linalg.LinAlgError as e:
print(f"Error in np.polyfit on predictor: '{col}'.\nError MSG: {e}")
print("Linear Least Squares coefficients were set to [1, 0]")
poly_coefs = np.array([1, 0])
self.trend_coefs.update({col: poly_coefs})
# Расчет монотонных границ
gr_subset_num = gr_subset[gr_subset["value"].isin(num_vals)].copy()
gr_subset_num["value"] = pd.to_numeric(gr_subset_num["value"])
gr_subset_num = gr_subset_num.sort_values("value")
borders = self._monotonic_borders(gr_subset_num, self.trend_coefs[col])
self.borders.update({col: borders})
# Применение границ к сгруппированным данным
gr_subset_num["groups"] = pd.cut(gr_subset_num["value"], borders)
gr_subset_num["type"] = "num"
except ValueError as e:
print(f"ValueError on predictor {col}.\nError MSG: {e}")
# Расчет коэффициентов тренда по категориальным значениям предиктора
if (~num_mask).sum() > 0:
gr_subset_cat = gr_subset[~gr_subset["value"].isin(num_vals)].copy()
gr_subset_cat["groups"] = gr_subset_cat["value"].fillna("пусто")
gr_subset_cat["type"] = "cat"
# Объединение числовых и категориальных значений
gr_subset = pd.concat([gr_subset_num, gr_subset_cat], axis=0, ignore_index=True)
# Расчет WOE и IV
alpha = self.alpha_values.get(col, 0)
res_i = self._statistic(gr_subset, alpha=alpha)
is_empty_exists = any(res_i["groups"].astype(str).str.contains("пусто"))
if is_empty_exists:
res_i["groups"].replace({"пусто": np.nan}, inplace=True)
return res_i
def _transform_single(self, x, stats=None):
"""
Применение группировки и WoE-преобразования
Parameters
---------------
x : pandas.Series
Значения предиктора
Returns
---------------
X_woe : pandas.DataFrame
WoE-преобразования значений предиктора
WoE = 0, если группа не встречалась в обучающей выборке
"""
orig_index = x.index
X_woe = x.copy()
if stats is None:
stats = self.stats.get_predictor(X_woe.name)
# Маппинги для замены групп на соответствующие значения WOE
num_map = {
stats.loc[i, "groups"]: stats.loc[i, "WOE"]
for i in stats.index
if stats.loc[i, "type"] == "num"
}
cat_map = {
stats.loc[i, "groups"]: stats.loc[i, "WOE"]
for i in stats.index
if stats.loc[i, "type"] == "cat"
}
# Категориальные группы
cat_bounds = stats.loc[stats["type"] == "cat", "groups"]
# predict по числовым значениям
DF_num = stats.loc[stats["type"] == "num"]
if DF_num.shape[0] > 0:
# Границы (правые) интервалов для разбивки числовых переменных
num_bounds = [-np.inf] + list(
pd.IntervalIndex(stats.loc[stats["type"] == "num", "groups"]).right
)
# Выделение только числовых значений предиктора
# (похожих на числа и тех, что явно не указаны как категориальные)
X_woe_num = pd.to_numeric(
X_woe[(self._get_nums_mask(X_woe)) & (~X_woe.isin(cat_bounds))]
)
# Разбивка значений на интервалы в соответствии с группировкой
X_woe_num = pd.cut(X_woe_num, num_bounds)
# Замена групп на значения WOE
X_woe_num = X_woe_num.replace(num_map)
X_woe_num.name = "woe"
else:
X_woe_num = pd.Series()
# predict по категориальным значениям (может обновлять значения по числовым)
DF_cat = stats.loc[stats["type"] == "cat"]
if DF_cat.shape[0] > 0:
# Выделение строковых значений и тех, что явно выделены как категориальные
X_woe_cat = X_woe[X_woe.isin(cat_map.keys())]
# Замена групп на значения WOE
X_woe_cat = X_woe_cat.replace(cat_map)
else:
X_woe_cat = pd.Series()
# predict по новым категориям (нечисловые: которых не было при групприровке)
# Сбор индексов категориальных и числовых значений
used_index = np.hstack([X_woe_cat.index, X_woe_num.index])
if len(used_index) < len(x):
X_woe_oth = X_woe.index.drop(used_index)
X_woe_oth = pd.Series(0, index=X_woe_oth)
else:
X_woe_oth = pd.Series()
X_woe = pd.concat([X_woe_num, X_woe_cat, X_woe_oth]).reindex(orig_index)
X_woe = pd.to_numeric(X_woe, downcast="signed")
return X_woe
def _monotonic_borders(self, grouped, p):
"""
Определение оптимальных границ групп предиктора (монотонный тренд)
Parameters
---------------
DF_grouping : pandas.DataFrame
Агрегированные данные по значениям предиктора (результат работы
фунции grouping, очищенный от категориальных значений).
Должен содержать поля 'predictor', 'sample_count', 'target_count',
'sample_rate и 'target_rate'
p : list-like, длиной в 2 элемента
Коэффициенты линейного тренда значений предиктора
Returns
---------------
R_borders : list
Правые границы групп для последующей группировки
"""
k01, k11 = (1, 1) if p[0] > 0 else (0, -1)
R_borders = []
min_ind = 0 # минимальный индекс. Начальные условия
DF_grouping = grouped.copy().sort_values("value").reset_index()
while min_ind < DF_grouping.shape[0]: # цикл по новым группам
# Расчет показателей накопительным итогом
DF_j = DF_grouping.iloc[min_ind:]
DF_iter = DF_j[["sample_rate", "sample_count", "target_count"]].cumsum()
DF_iter["non_target_count"] = (
DF_iter["sample_count"] - DF_iter["target_count"]
)
DF_iter["target_rate"] = DF_iter["target_count"] / DF_iter["sample_count"]
# Проверка на соответствие критериям групп
DF_iter["check"] = self._check_groups(DF_iter)
# Расчет базы для проверки оптимальности границы
# В зависимости от тренда считается скользящий _вперед_ минимум или максимум
# (в расчете участвуют все наблюдения от текущего до последнего)
if k11 == 1:
DF_iter["pd_gr"] = (
DF_iter["target_rate"][::-1]
.rolling(len(DF_iter), min_periods=0)
.min()[::-1]
)
else:
DF_iter["pd_gr"] = (
DF_iter["target_rate"][::-1]
.rolling(len(DF_iter), min_periods=0)
.max()[::-1]
)
# Проверка оптимальности границы
DF_iter["opt"] = DF_iter["target_rate"] == DF_iter["pd_gr"]
DF_iter = pd.concat([DF_j[["value"]], DF_iter], axis=1)
try:
min_ind = DF_iter.loc[
(DF_iter["check"]) & (DF_iter["opt"]), "target_rate"
].index.values[0]
score_j = DF_iter.loc[min_ind, "value"]
if (
len(R_borders) > 0 and score_j == R_borders[-1]
): # Выход из цикла, если нет оптимальных границ
break
except Exception:
break
min_ind += 1
R_borders.append(score_j)
# Проверка последней добавленной группы
if len(R_borders) > 0:
DF_iter = DF_grouping.loc[DF_grouping["value"] > R_borders[-1]]
sample_rate_i = DF_iter["sample_rate"].sum() # доля выборки
sample_count_i = DF_iter["sample_count"].sum() # количество наблюдений
target_count_i = DF_iter["target_count"].sum() # количество целевых
non_target_count_i = sample_count_i - target_count_i # количество нецелевых
if (
(sample_rate_i < self.min_sample_rate)
or (target_count_i < self.min_count)
or (non_target_count_i < self.min_count)
):
R_borders.remove(R_borders[-1]) # удаление последней границы
else:
predictor = DF_grouping["predictor"].iloc[0]
warnings.warn(
f"Couldn't find any borders for feature {predictor}.\n Borders set on (-inf, +inf)"
)
R_borders = [-np.inf] + R_borders + [np.inf]
return R_borders
def _check_groups(
self,
df,
sample_rate_col="sample_rate",
sample_count_col="sample_count",
target_count_col="target_count",
):
""" Проверить сгруппированные значения предиктора на соответствме условиям"""
cond_mask = (
(df[sample_rate_col] >= self.min_sample_rate - 10 ** -9)
& (df[sample_count_col] >= self.min_count)
& (df[target_count_col] >= self.min_count)
)
return cond_mask
def _get_cat_values_for_join(self, grouped):
"""Получить словарь для замены категорий на объединяемые
NOTE: Нужно тестирование
TODO: переписать на рекурсию
"""
df = grouped.copy()
cond_mask = ~self._check_groups(df)
res = df[
[
"predictor",
"value",
"sample_count",
"target_count",
"sample_rate",
"target_rate",
]
].copy()
res = res.sort_values(["sample_rate", "target_rate"])
res["cum_sample_rate"] = res["sample_rate"].cumsum()
res["check"] = cond_mask
res["check_reverse"] = ~cond_mask
res["check_diff"] = res["check"].astype(int).diff()
res["new_group"] = (res["check_diff"] == -1).astype(int)
res["exist_group"] = res["check_reverse"].astype(int).eq(1)
res.loc[~res["check_reverse"], "exist_group"] = np.NaN
res["exist_group_cum"] = res["exist_group"].cumsum().fillna(method="bfill")
res[["cum_sr", "cum_sc", "cum_tc"]] = res.groupby("exist_group_cum").agg(
{
"sample_rate": "cumsum",
"sample_count": "cumsum",
"target_count": "cumsum",
}
)
res["cum_sr_check"] = (
self._check_groups(res, "cum_sr", "cum_sc", "cum_tc")
.astype(int)
.diff()
.eq(1)
.astype(int)
.shift()
)
display(res)
res.loc[res["cum_sr_check"] != 1, "cum_sr_check"] = np.nan
res["cum_sr_check"] = res["cum_sr_check"].fillna(method="ffill").fillna(0)
res["group_number"] = res["exist_group_cum"] + res["cum_sr_check"]
repl = res.groupby("group_number").agg({"value": list}).to_dict()["value"]
repl = {k: "_".join(v) for k, v in repl.items()}
res["group_vals"] = res["group_number"].replace(repl)
t = dict(zip(res["value"], res["group_vals"]))
return t
def _plot_single_woe_grouping(self, stats, ax_pd=None):
"""
Построение графика по группировке предиктора
Parameters
---------------
stats : pandas.DataFrame
Статистика по каждой группе (результат работы функции statistic):
минимальное, максимальное значение, доля от общего объема выборки,
количество и доля целевых и нецелевых событий в каждой группе,
WOE и IV каждой группы
Должен содержать столбцы: 'sample_rate', 'target_rate', 'WOE'
ax_pd : matplotlib.Axes
Набор осей (subplot)
"""
# Расчеты
x2 = [stats["sample_rate"][:i].sum() for i in range(stats.shape[0])] + [
1
] # доля выборки с накоплением
x = [
np.mean(x2[i : i + 2]) for i in range(len(x2) - 1)
] # средняя точка в группах
# Выделение нужной информации для компактности
woe = list(stats["WOE"])
height = list(stats["target_rate"]) # проблемность в группе
width = list(stats["sample_rate"]) # доля выборки на группу
# Визуализация
if ax_pd is None:
_, ax_pd = plt.subplots(figsize=(8, 5))
# Столбчатая диаграмма доли целевых в группах
ax_pd.bar(
x=x,
height=height,
width=width,
color=[0, 122 / 255, 123 / 255],
label="Группировка",
alpha=0.7,
)
# График значений WOE по группам
ax_woe = ax_pd.twinx() # дубликат осей координат
ax_woe.plot(
x, woe, lw=2, color=[37 / 255, 40 / 255, 43 / 255], label="woe", marker="o"
)
# Линия нулевого значения WOE
ax_woe.plot(
[0, 1], [0, 0], lw=1, color=[37 / 255, 40 / 255, 43 / 255], linestyle="--"
)
# Настройка осей координат
plt.xlim([0, 1])
plt.xticks(x2, [round(i, 2) for i in x2], fontsize=12)
ax_pd.grid(True)
ax_pd.set_xlabel("Доля выборки", fontsize=16)
ax_pd.set_ylabel("pd", fontsize=16)
ax_woe.set_ylabel("woe", fontsize=16)
# Расчет границ графика и шага сетки
max_woe = max([int(abs(i)) + 1 for i in woe])
max_pd = max([int(i * 10) + 1 for i in height]) / 10
# Границы и сетка для столбчатой диаграммы
ax_pd.set_ylim([0, max_pd])
ax_pd.set_yticks([round(i, 2) for i in np.linspace(0, max_pd, 11)])
ax_pd.legend(bbox_to_anchor=(1.05, 0.83), loc=[0.2, -0.25], fontsize=14)
# Границы и сетка для графика WOE
ax_woe.set_ylim([-max_woe, max_woe])
ax_woe.set_yticks([round(i, 2) for i in np.linspace(-max_woe, max_woe, 11)])
ax_woe.legend(bbox_to_anchor=(1.05, 0.92), loc=[0.6, -0.25], fontsize=14)
plt.title(
"Группировка предиктора {}".format(stats.loc[0, "predictor"]), fontsize=18
)
# Для категориальных
n_cat = stats.loc[stats["type"] == "cat"].shape[0]
if n_cat > 0:
ax_pd.bar(
x=x[-n_cat:],
height=height[-n_cat:],
width=width[-n_cat:],
color="m",
label="Категориальные",
)
ax_pd.legend(bbox_to_anchor=(1.05, 0.76), loc=[0.15, -0.33], fontsize=14)
plt.tight_layout()
def _get_possible_groups(self):
"""
Поиск возможных групп в значениях предикторов после агрегации
"""
self.possible_groups = pd.DataFrame()
# Выделение значений предиктора с достаточным кол-вом наблюдений и
# не отмеченных, как категориальные
for i in self.predictors:
cat_vals = self.cat_values.get(i, [])
DF_i1 = self.grouped.get_predictor(i).copy()
DF_i1 = DF_i1.loc[
(DF_i1["sample_rate"] > self.min_sample_rate)
& (~DF_i1["value"].isin(cat_vals))
]
# Выделение всех значений предиктора, не отмеченных, как категориальные
DF_i2 = self.grouped.get_predictor(i).copy()
DF_i2 = DF_i2.loc[(~DF_i2["value"].isin(cat_vals))]
# Выбор значений: которые не равны бесконености и при этом не являются числами
L = ~(DF_i2["value"] == np.inf) & (~(self._get_nums_mask(DF_i2["value"])))
DF_i2 = DF_i2.loc[L]
# Объединение найденных значений в одну таблицу
DF_i = pd.concat((DF_i1, DF_i2), ignore_index=True).drop_duplicates()
self.possible_groups = self.possible_groups.append(DF_i)
def _get_bad_groups(self):
"""
Поиск групп: не удовлетворяющих условиям
"""
self.bad_groups = self.stats.loc[
(self.stats["sample_rate"] < self.min_sample_rate)
| (self.stats["target_count"] < self.min_count)
| (self.stats["sample_count"] - self.stats["target_count"] < self.min_count)
]
def _regularize_groups(self, stats, alpha=0):
"""расчет оптимальной целевой для группы на основе готовой woe-группировки
формула и детали в видео
https://www.youtube.com/watch?v=g335THJxkto&list=PLLIunAIxCvT8ZYpC6-X7H0QfAQO9H0f-8&index=12&t=0s
pd = (y_local * K + Y_global * alpha) / (K + alpha)"""
Y_global = stats["target_count"].sum() / stats["sample_count"].sum()
K = stats["sample_count"] / stats["sample_count"].sum()
stats["target_rate"] = (stats["target_rate"] * K + Y_global * alpha) / (
K + alpha
)
stats["target_count"] = np.floor(
stats["sample_count"] * stats["target_rate"]
).astype(int)
return stats
def _statistic(self, grouped, alpha=0):
"""
Расчет статистики по группам предиктора: минимальное, максимальное значение, доля от
общего объема выборки, количество и доля целевых и нецелевых событий в каждой группе
А также расчет WOE и IV каждой группы
Parameters
---------------
grouped : pandas.DataFrame
Данные полученных групп предиктора. Кол-во строк совпадает с кол-вом
уникальных значений предиктора.
Должен содержать столбцы: 'sample_count', 'target_count', 'groups'
alpha : float, default 0
Коэффициент регуляризации групп
Returns
---------------
stats : pandas.DataFrame
Агрегированные данные по каждой группе
"""
nothing = 10 ** -6
stats = grouped.groupby(["predictor", "groups"], sort=False).agg(
{
"type": "first",
"sample_count": "sum",
"target_count": "sum",
"value": ["min", "max"],
},
)
stats.columns = ["type", "sample_count", "target_count", "min", "max"]
stats.reset_index(inplace=True)
stats["sample_rate"] = stats["sample_count"] / stats["sample_count"].sum()
stats["target_rate"] = stats["target_count"] / stats["sample_count"]
stats = self._regularize_groups(stats, alpha=alpha)
# Расчет WoE и IV
samples_num = stats["sample_count"].sum()
events = stats["target_count"].sum()
non_events = samples_num - events
stats["non_events_i"] = stats["sample_count"] - stats["target_count"]
stats["event_rate_i"] = stats["target_count"] / (events + nothing)
stats["non_event_rate_i"] = stats["non_events_i"] / (non_events + nothing)
stats["WOE"] = np.log(
stats["non_event_rate_i"] / (stats["event_rate_i"] + nothing) + nothing
)
stats["IV"] = stats["WOE"] * (stats["non_event_rate_i"] - stats["event_rate_i"])
return stats
def _calc_trend_coefs(self, x, y):
"""
Расчет коэффициентов тренда
Parameters
---------------
x : pandas.Series
Значения предиктора
y : pandas.Series
Целевая переменная
Returns
-----------
dict[str, tuple[float, float]]
"""
return {x.name: np.polyfit(x, y, deg=1)}
# Служебные функции
def _reset_state(self):
self.trend_coefs = {}
self.borders = {}
self.cat_values = {}
self.predictors = []
self.grouped = _GroupedPredictor()
self.stats = _GroupedPredictor()
def _get_nums_mask(self, x):
# if x.apply(lambda x: isinstance(x, str)).sum() == len(x):
# return pd.Series(False, index=x.index)
# else:
# mask = pd.to_numeric(x, errors="coerce").notna()
mask = pd.to_numeric(x, errors="coerce").notna()
return mask
class WoeTransformerRegularized(WoeTransformer):
"""
Класс для построения и применения WOE группировки к датасету с применением
регуляризации малых групп
"""
def __init__(self, min_sample_rate=0.05, min_count=3, alphas=None, n_seeds=30):
"""
Инициализация экземпляра класса
"""
self.min_sample_rate = min_sample_rate
self.min_count = min_count
self.predictors = []
self.alphas = 100 if alphas is None else alphas
self.alpha_values = {}
self.n_seeds = n_seeds
def fit(self, X, y, cat_values={}, alpha_values={}):
"""
Обучение трансформера и расчет всех промежуточных данных
Parameters
---------------
X : pd.DataFrame
Датафрейм с предикторами, которые нужно сгруппировать
y : pd.Series
Целевая переменная
cat_values : dict[str, list[str]], optional
Словарь списков с особыми значениями, которые нужно
выделить в категории
По умолчанию все строковые и пропущенные значения
выделяются в отдельные категории
alpha_values : dict[str, float], optional
Словарь со значениями alpha для регуляризации WOE-групп
Returns
-------
self : WoeTransformer
"""
# Сброс текущего состояния трансформера
self._reset_state()
self.cat_values = cat_values
self.regularization_stats = _GroupedPredictor()
for col in tqdm(X.columns, desc="Searching alphas"):
temp_alpha = self._cat_features_alpha_logloss(
X[col], y, self.alphas, self.n_seeds
)
self.alpha_values.update({col: temp_alpha})
self._grouping(X, y)
# Расчет WOE и IV
self._fit_numeric(X, y)
# Поиск потенциальных групп
# Поиск "плохих" групп
self._get_bad_groups()
return self
def _cat_features_alpha_logloss(self, x, y, alphas, seed=30):
"""
функция расчета IV, GINI и logloss для категориальных
переменных с корректировкой целевой по alpha
"""
# задаем промежуточную функцию для WOE преобразования переменной из исходного датафрейма
# по рассчитанным WOE из IVWOE
def calc_woe_i(row_value, stats):
return stats.loc[stats["groups"] == row_value, "WOE"].values[0]
predictor = x.name
target = y.name
df = pd.DataFrame({predictor: x.values, target: y.values})
df[predictor] = df[predictor].fillna("NO_INFO")
L_logloss_mean = []
GINI_IV_mean = []
for alpha_i in alphas:
logloss_i = []
GINI_i = []
IV_i = []
for seed_i in range(seed):
X_train, X_test, y_train, y_test = train_test_split(
x, y, test_size=0.3, random_state=seed_i, stratify=y
)
# Группировка значений предиктора с текущим alpha
df_i = self._group_single(X_train, y_train)
df_i["groups"] = df_i["value"].fillna("пусто")
df_i["type"] = "cat"
# Обучение и применение группировки к обучающему набору
WOE_i = self._fit_single(X_train, y_train, df_i)
WOE_i = self._regularize_groups(WOE_i, alpha_i)
# расчет оптимальной целевой для группы, формула и детали в видео
# https://www.youtube.com/watch?v=g335THJxkto&list=PLLIunAIxCvT8ZYpC6-X7H0QfAQO9H0f-8&index=12&t=0s
# pd = (y_local * K + Y_global * alpha) / (K + alpha)
Y_global = y_train.mean()
K = WOE_i["sample_count"] / WOE_i["sample_count"].sum()
WOE_i["target_rate"] = (
WOE_i["target_rate"] * K + Y_global * alpha_i
) / (K + alpha_i)
WOE_i["target_count"] = np.floor(
WOE_i["sample_count"] * WOE_i["target_rate"]
).astype(int)
X_test_WOE = self._transform_single(X_test, WOE_i)
roc_auc_i = sk.metrics.roc_auc_score(y_test, X_test_WOE)
# Подстановка регуляризованной доли целевой вместо каждой группы
target_transformed = X_test_WOE.replace(
dict(zip(WOE_i["WOE"], WOE_i["target_rate"]))
)
# Запись значений
logloss_i.append(
sk.metrics.log_loss(y_test, target_transformed.fillna(0))
)
IV_i.append(WOE_i["IV"].sum())
GINI_i.append(abs(2 * roc_auc_i - 1))
# Запись средних значений
L_logloss_mean.append([alpha_i, np.mean(logloss_i)])
GINI_IV_mean.append([alpha_i, np.mean(GINI_i), np.mean(IV_i)])
alpha_GINI_IV = pd.DataFrame(GINI_IV_mean, columns=["alpha", "GINI", "IV"])
alpha_GINI_IV.insert(0, "predictor", predictor)
self.regularization_stats = self.regularization_stats.append(alpha_GINI_IV)
# Индекс значения alpha с наименьшим логлоссом
min_logloss_ind = np.argmin(L_logloss_mean, axis=0)[1]
alpha_opt = L_logloss_mean[min_logloss_ind][0]
return alpha_opt
########################
# Комплект ускоренных версий функции #
########################
# Сильно отстал от класса, но в точности повторяет функциональность Vanilla
def grouping(DF_data_i, low_acc=False):
"""
Агрегация данных по значениям предиктора. Рассчитывает количество наблюдений,
количество целевых событий, долю группы от общего числа наблюдений и долю целевых в группе
Parameters
---------------
DF_data_i : pandas.DataFrame
Таблица данных для агрегации, должна содержать поля 'predictor' и 'target'.
Поле target при этом должно состоять из 0 и 1, где 1 - целевое событие
low_acc : int, default None
Параметр для округления значений предиктора.
Если None, то предиктор не округляется.
Если целое неотрицательное число, параметр используется для определения
количества знаков после запятой, остальные значения игнорируются
Returns
---------------
DF_grouping : pandas.DataFrame
Таблица с агрегированными данными по значениям предиктора
"""
# Округение, если аргумент принимает допустимые значения
if low_acc and type(low_acc) is int and low_acc > 0:
DF_data_i = DF_data_i[["predictor", "target"]].round(low_acc)
# Группировка и расчет показателей
DF_grouping = (
DF_data_i.groupby("predictor")["target"].agg(["count", "sum"]).reset_index()
)
DF_grouping.columns = ["predictor", "sample_count", "target_count"]
DF_grouping["sample_rate"] = (
DF_grouping["sample_count"] / DF_grouping["sample_count"].sum()
)
DF_grouping["target_rate"] = (
DF_grouping["target_count"] / DF_grouping["sample_count"]
)
return DF_grouping
def monotonic_borders(DF_grouping, p, min_sample_rate=0.05, min_count=3):
"""
Определение оптимальных границ групп предиктора (монотонный тренд)
Parameters
---------------
DF_grouping : pandas.DataFrame
Агрегированные данные по значениям предиктора (результат работы
фунции grouping, очищенный от категориальных значений).
Должен содержать поля 'predictor', 'sample_count', 'target_count',
'sample_rate и 'target_rate'
p : list-like, длиной в 2 элемента
Коэффициенты линейного тренда значений предиктора
min_sample_rate : float, default 0.05
Минимальный размер группы (доля от размера выборки)
min_count : int, default 3
Минимальное количество наблюдений каждого класса в группе
Returns
---------------
R_borders : list
Правые границы групп для последующей группировки
"""
k01, k11 = (1, 1) if p[0] > 0 else (0, -1)
R_borders = []
min_ind = 0 # минимальный индекс. Начальные условия
while min_ind < DF_grouping.shape[0]: # цикл по новым группам
# Расчет показателей накопительным итогом
DF_j = DF_grouping.loc[min_ind:]
DF_iter = DF_j[["sample_rate", "sample_count", "target_count"]].cumsum()
DF_iter["non_target_count"] = DF_iter["sample_count"] - DF_iter["target_count"]
DF_iter["target_rate"] = DF_iter["target_count"] / DF_iter["sample_count"]
# Проверка на соответствие критериям групп
DF_iter["check"] = (
(DF_iter["sample_rate"] >= min_sample_rate - 10 ** -9)
& (DF_iter["target_count"] >= min_count)
& (DF_iter["non_target_count"] >= min_count)
)
# Расчет базы для проверки оптимальности границы
# В зависимости от тренда считается скользящий _вперед_ минимум или максимум
# (в расчете участвуют все наблюдения от текущего до последнего)
if k11 == 1:
DF_iter["pd_gr"] = (
DF_iter["target_rate"][::-1]
.rolling(len(DF_iter), min_periods=0)
.min()[::-1]
)
else:
DF_iter["pd_gr"] = (
DF_iter["target_rate"][::-1]
.rolling(len(DF_iter), min_periods=0)
.max()[::-1]
)
# Проверка оптимальности границы
DF_iter["opt"] = DF_iter["target_rate"] == DF_iter["pd_gr"]
DF_iter = pd.concat([DF_j[["predictor"]], DF_iter], axis=1)
try:
min_ind = DF_iter.loc[
(DF_iter["check"]) & (DF_iter["opt"]), "target_rate"
].index.values[0]
score_j = DF_iter.loc[min_ind, "predictor"]
if (
len(R_borders) > 0 and score_j == R_borders[-1]
): # Выход из цикла, если нет оптимальных границ
break
except Exception:
break
min_ind += 1
R_borders.append(score_j)
# Проверка последней добавленной группы
DF_iter = DF_grouping.loc[DF_grouping["predictor"] > R_borders[-1]]
sample_rate_i = DF_iter["sample_rate"].sum() # доля выборки
sample_count_i = DF_iter["sample_count"].sum() # количество наблюдений
target_count_i = DF_iter["target_count"].sum() # количество целевых
non_target_count_i = sample_count_i - target_count_i # количество нецелевых
if (
(sample_rate_i < min_sample_rate)
or (target_count_i < min_count)
or (non_target_count_i < min_count)
):
R_borders.remove(R_borders[-1]) # удаление последней границы
return R_borders
# Статистика
def statistic(DF_groups):
"""
Расчет статистики по группам предиктора: минимальное, максимальное значение, доля от
общего объема выборки, количество и доля целевых и нецелевых событий в каждой группе
А также расчет WOE и IV каждой группы
Parameters
---------------
DF_groups : pandas.DataFrame
Данные полученных групп предиктора. Кол-во строк совпадает с кол-вом
уникальных значений предиктора.
Должен содержать столбцы: 'sample_count', 'target_count', 'groups'
Returns
---------------
DF_statistic : pandas.DataFrame
Агрегированные данные по каждой группе
"""
nothing = 10 ** -6
DF_statistic = (
DF_groups[["sample_count", "target_count", "groups"]]
.groupby("groups", as_index=False, sort=False)
.sum()
)
DF_statistic_min = (
DF_groups[["predictor", "groups"]]
.groupby("groups", as_index=False, sort=False)
.min()
)
DF_statistic_max = (
DF_groups[["predictor", "groups"]]
.groupby("groups", as_index=False, sort=False)
.max()
)
DF_statistic["min"] = DF_statistic_min["predictor"]
DF_statistic["max"] = DF_statistic_max["predictor"]
DF_statistic["sample_rate"] = (
DF_statistic["sample_count"] / DF_statistic["sample_count"].sum()
)
DF_statistic["target_rate"] = (
DF_statistic["target_count"] / DF_statistic["sample_count"]
)
# Расчет WoE и IV
samples_num = DF_statistic["sample_count"].sum()
events = DF_statistic["target_count"].sum()
non_events = samples_num - events
DF_statistic["non_events_i"] = (
DF_statistic["sample_count"] - DF_statistic["target_count"]
)
DF_statistic["event_rate_i"] = DF_statistic["target_count"] / (events + nothing)
DF_statistic["non_event_rate_i"] = DF_statistic["non_events_i"] / (
non_events + nothing
)
DF_statistic["WOE"] = np.log(
DF_statistic["non_event_rate_i"] / (DF_statistic["event_rate_i"] + nothing)
+ nothing
)
DF_statistic["IV"] = DF_statistic["WOE"] * (
DF_statistic["non_event_rate_i"] - DF_statistic["event_rate_i"]
)
DF_statistic = DF_statistic.merge(
DF_groups[["type", "groups"]].drop_duplicates(), how="left", on="groups"
)
return DF_statistic
# Графики
def group_plot(DF_result):
"""
Построение графика по группировке предиктора
Parameters
---------------
DF_result : pandas.DataFrame
Статистика по каждой группе (результат работы функции statistic):
минимальное, максимальное значение, доля от общего объема выборки,
количество и доля целевых и нецелевых событий в каждой группе,
WOE и IV каждой группы
Должен содержать столбцы: 'sample_rate', 'target_rate', 'WOE'
Returns
---------------
None
Не возвращает ничего
"""
# Расчеты
sample_rate, target_rate, WOE = ["sample_rate", "target_rate", "WOE"]
x2 = [DF_result[sample_rate][:i].sum() for i in range(DF_result.shape[0])] + [
1
] # доля выборки с накоплением
x = [np.mean(x2[i : i + 2]) for i in range(len(x2) - 1)] # средняя точка в группах
# Выделение нужной информации для компактности
woe = list(DF_result[WOE])
height = list(DF_result[target_rate]) # проблемность в группе
width = list(DF_result[sample_rate]) # доля выборки на группу
# Визуализация
fig, ax_pd = plt.subplots(figsize=(8, 5))
# Столбчатая диаграмма доли целевых в группах
ax_pd.bar(
x=x,
height=height,
width=width,
color=[0, 122 / 255, 123 / 255],
label="Группировка",
alpha=0.7,
)
# График значений WOE по группам
ax_woe = ax_pd.twinx() # дубликат осей координат
ax_woe.plot(
x, woe, lw=2, color=[37 / 255, 40 / 255, 43 / 255], label="woe", marker="o"
)
# Линия нулевого значения WOE
ax_woe.plot(
[0, 1], [0, 0], lw=1, color=[37 / 255, 40 / 255, 43 / 255], linestyle="--"
)
# Настройка осей координат
plt.xlim([0, 1])
plt.xticks(x2, [round(i, 2) for i in x2], fontsize=12)
ax_pd.grid(True)
ax_pd.set_xlabel("Доля выборки", fontsize=16)
ax_pd.set_ylabel("pd", fontsize=16)
ax_woe.set_ylabel("woe", fontsize=16)
# Расчет границ графика и шага сетки
max_woe = max([int(abs(i)) + 1 for i in woe])
max_pd = max([int(i * 10) + 1 for i in height]) / 10
# Границы и сетка для столбчатой диаграммы
ax_pd.set_ylim([0, max_pd])
ax_pd.set_yticks([round(i, 2) for i in np.linspace(0, max_pd, 11)])
ax_pd.legend(loc=[0.2, -0.25], fontsize=14)
# Границы и сетка для графика WOE
ax_woe.set_ylim([-max_woe, max_woe])
ax_woe.set_yticks([round(i, 2) for i in np.linspace(-max_woe, max_woe, 11)])
ax_woe.legend(loc=[0.6, -0.25], fontsize=14)
plt.title("Группировка предиктора", fontsize=18)
# Для категориальных
n_cat = DF_result.loc[DF_result["type"] == "cat"].shape[0]
if n_cat > 0:
ax_pd.bar(
x=x[-n_cat:],
height=height[-n_cat:],
width=width[-n_cat:],
color="m",
label="Категориальные",
)
ax_pd.legend(loc=[0.15, -0.33], fontsize=14)
plt.show()
# ## Трансформер
def woe_transformer(
x,
y,
cat_values=[],
min_sample_rate=0.05,
min_count=3,
errors="skip",
low_accuracy=None,
plot=True,
verbose=True,
):
"""
Группировка значений предиктора, определение оптимальных границ и расчет WOE и IV
Parameters
---------------
x : pandas.Series
Mассив числовых значений предиктора. Не должен содержать пропущенных
значений, но может сочетать строковые и числовые
y : pandas.Series
Mассив меток класса (0, 1)
cat_values: list
Категориальные значения (пустышки и несравнимые значения).
Элементы списка должны быть строками
min_sample_rate : float, default 0.05
Минимальный размер группы (доля от размера выборки)
min_count : int, default 3
Минимальное количество наблюдений каждого класса в группе
errors : str, defaulf 'skip'
Способ обработки ошибок:
'skip' - не возвращать ничего в случае ошибки
'origin' - вернуть исходные значения предиктора
'raise' - бросить исключение
low_accuracy : int, default None
Режим пониженной точности (округление при группировке)
Если None, то предиктор не округляется.
Если целое неотрицательное число, параметр используется для определения
количества знаков после запятой, остальные значения игнорируются
plot : bool, default True
Включение/выключение визуализации группировки
verbose : bool, default True
Включение.выключение доп. информации по группировке
Returns
---------------
DF_result : pandas.DataFrame
Таблица с итоговой группировкой и статистикой
"""
if errors not in ["skip", "raise"]:
warnings.warn(
f"Attribute `errors` must be one of ['skip', 'raise']. Passed {errors}.\n\
Defaulting to 'skip'"
)
errors = "skip"
# Обработка входных данных
DF_data_i = pd.DataFrame({"predictor": x, "target": y})
# Агрегация данных по значениям предиктора
DF_data_gr = grouping(DF_data_i, low_accuracy)
# Проверка категориальных групп (возможные дополнительные категории)
if verbose:
# Выделение значений предиктора с достаточным кол-вом наблюдений и
# не отмеченных, как категориальные
DF_i1 = DF_data_gr.loc[DF_data_gr["sample_rate"] > min_sample_rate].loc[
~DF_data_gr["predictor"].isin(cat_values)
]
# Выделение всех значений предиктора, не отмеченных, как категориальные
DF_i2 = DF_data_gr.loc[~DF_data_gr["predictor"].isin(cat_values)]
# Выбор значений: которые не равны бесконености и при этом не являются числами
L = ~(DF_i2["predictor"] == np.inf) & (
pd.to_numeric(DF_i2["predictor"], errors="coerce").isna()
)
DF_i2 = DF_i2.loc[L]
# Объединение найденных значений в одну таблицу
DF_i = DF_i1.append(DF_i2, ignore_index=True).drop_duplicates()
if DF_i.shape[0] > 0:
print("Возможно эти значения предиктора тоже являются категориальными:")
display(DF_i)
# Выделение числовых значений предиктора
DF_data_gr_num = DF_data_gr.loc[
~DF_data_gr["predictor"].isin(cat_values)
].reset_index(drop=True)
if DF_data_gr_num.shape[0] > 0:
try:
DF_data_gr_num["predictor"] = DF_data_gr_num["predictor"].astype("float")
# Определение тренда по числовым значениям
DF_i = DF_data_i.loc[~DF_data_i["predictor"].isin(cat_values)]
p = np.polyfit(DF_i["predictor"].astype("float"), DF_i["target"], deg=1)
# Определение оптимальных границ групп
R_borders = monotonic_borders(DF_data_gr_num, p, min_sample_rate, min_count)
except Exception:
if errors == "raise":
raise ValueError("Ошибка при расчете монотонных границ")
else:
print("Ошибка при расчете монотонных границ")
try:
# Применение границ
DF_data_gr_num["groups"] = pd.cut(
DF_data_gr_num["predictor"], [-np.inf] + R_borders + [np.inf]
)
DF_data_gr_num["type"] = "num"
except Exception:
if errors == "raise":
raise ValueError("Ошибка при применении монотонных границ")
else:
print("Ошибка при применении монотонных границ")
# Добавление данных по категориальным значениям
DF_data_gr_2k = DF_data_gr.loc[
DF_data_gr["predictor"].isin(cat_values)
].reset_index(drop=True)
DF_data_gr_2k["groups"] = DF_data_gr_2k["predictor"].copy()
DF_data_gr_2k["type"] = "cat"
try:
# Расчет статистики, WoE и IV по группам числовых значений
if DF_data_gr_num.shape[0] > 0:
DF_result = statistic(
DF_data_gr_num.append(DF_data_gr_2k, ignore_index=True)
)
else:
DF_result = statistic(DF_data_gr_2k)
except Exception:
print("Ошибка при расчете статистики")
# Проверка категориальных групп (категории, которые не удовлетворяют заданным ограничениям)
if verbose:
DF_j = DF_result.loc[
(DF_result["sample_rate"] < min_sample_rate)
| (DF_result["target_count"] < min_count)
| (DF_result["sample_count"] - DF_result["target_count"] < min_count)
]
if DF_j.shape[0] > 0:
print("Эти группы не удовлетворяют заданным ограничениям:")
display(DF_j)
# Построение графика
if plot:
group_plot(DF_result)
return DF_result
def woe_apply(S_data, DF_groups):
"""
Применение группировки и WoE-преобразования
Parameters---------------
S_data : pandas.Series
Значения предиктора
DF_groups : pandas.DataFrame
Данные о группировке предиктора
Returns
---------------
X_woe : pandas.DataFrame
WoE-преобразования значений предиктора
WoE = 0, если группа не встречалась в обучающей выборке
"""
X_woe = S_data.copy()
# Маппинги для замены групп на соответствующие значения WOE
num_map = {
DF_groups.loc[i, "groups"]: DF_groups.loc[i, "WOE"]
for i in DF_groups.index
if DF_groups.loc[i, "type"] == "num"
}
cat_map = {
DF_groups.loc[i, "groups"]: DF_groups.loc[i, "WOE"]
for i in DF_groups.index
if DF_groups.loc[i, "type"] == "cat"
}
# Категориальные группы
cat_bounds = DF_groups.loc[DF_groups["type"] == "cat", "groups"]
# predict по числовым значениям
DF_num = DF_groups.loc[DF_groups["type"] == "num"]
if DF_num.shape[0] > 0:
# Границы (правые) интервалов для разбивки числовых переменных
num_bounds = [-np.inf] + list(
| pd.IntervalIndex(DF_groups.loc[DF_groups["type"] == "num", "groups"]) | pandas.IntervalIndex |
#!/usr/bin/env python
# coding: utf-8
# # The Claremont Colleges' Semester Start Timeline vs Los Angeles County COVID-19 Trends
#
# ## Semester Start Dates
# * **Fall 2020** - 24 August 2020
# * **Spring 2021** - 25 January 2021
#
# <!--## Last Update
# Tuesday, 3 November 2020 -->
#
# ## Data Sources
# * California Department of Public Health
# * [COVID-19 Cases](https://data.ca.gov/dataset/covid-19-cases/resource/926fd08f-cc91-4828-af38-bd45de97f8c3?filters=county%3ALos+Angeles)
# * [COVID-19 Hospital Data](https://data.ca.gov/dataset/covid-19-hospital-data/resource/42d33765-20fd-44b8-a978-b083b7542225?filters=county%3ALos+Angeles)
# In[ ]:
import locale
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import numpy as np
import pandas as pd
import requests
import seaborn as sns
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
plt.rcParams.update({'figure.autolayout': True})
sns.set()
CA_CASES_URL = 'https://data.ca.gov/dataset/590188d5-8545-4c93-a9a0-e230f0db7290/resource/926fd08f-cc91-4828-af38-bd45de97f8c3/download/statewide_cases.csv'
CA_CASES_CSV = 'ca_cases.csv'
CA_HOSPITALIZED_URL = 'https://data.ca.gov/dataset/529ac907-6ba1-4cb7-9aae-8966fc96aeef/resource/42d33765-20fd-44b8-a978-b083b7542225/download/hospitals_by_county.csv'
CA_HOSPITALIZED_CSV = 'ca_hospitalized.csv'
COUNTY = 'county'
DATE = 'date'
NEW_CASES = 'newcountconfirmed'
LOS_ANGELES = 'Los Angeles'
NEW_CASES_AVG = 'New Cases, 14 day average'
HOSPITALIZED_CONFIRMED_AVG = 'Hospitalized - Confrimed, 3 day average'
HOSPITALIZED_ALL_AVG = 'Hospitalized - Confirmed and Suspected, 3 day average'
SEMESTER = 'Semester'
DAYS_UNTIL_SEMESTER = 'Days Until Semester Start'
CASE_ROLLING_WINDOW = 14
NEW_CASES_AVG = 'New Cases, {} day average'.format(CASE_ROLLING_WINDOW)
FALL_2020 = 'Fall 2020'
FALL_2020_START = pd.Timestamp('2020-08-24')
FALL_2020_COLOR = sns.color_palette()[0]
SPRING_2021 = 'Spring 2021'
SPRING_2021_START = pd.Timestamp('2021-01-25')
SPRING_2021_COLOR = sns.color_palette()[1]
X_AXIS_LABEL = 'Date (Fall 2020 timeline, Spring 2021 timeline)'
def fetch_ca_dataset(url, output_csv):
r = requests.get(url)
if r.status_code == 200:
with open(output_csv, 'w') as f:
f.write(r.text)
else:
raise ConnectionError('HTTP code not 200')
def days_until_start(row: pd.Series) -> int:
if row[SEMESTER] == FALL_2020:
return (FALL_2020_START - row[DATE]).days
elif row[SEMESTER] == SPRING_2021:
return (SPRING_2021_START - row[DATE]).days
def date_axis_text(x, pos):
td = pd.Timedelta(x, 'days')
fall_equiv, spring_equiv = [
(semester-td).strftime('%b %d') for semester in (FALL_2020_START, SPRING_2021_START)]
return ('{}\n{}'.format(fall_equiv, spring_equiv))
def chart_upper_bound(dep_var_series, tick_step, buffer):
ticks_needed = (dep_var_series.max() + tick_step) // tick_step
return int(tick_step * ticks_needed + buffer)
def chart_lower_bound(upper_bound, ratio, top_value):
return (ratio * upper_bound - top_value) / (ratio - 1)
# In[ ]:
fetch_ca_dataset(CA_CASES_URL, CA_CASES_CSV)
fetch_ca_dataset(CA_HOSPITALIZED_URL, CA_HOSPITALIZED_CSV)
# In[ ]:
df_cases = pd.read_csv(CA_CASES_CSV)
la_cases = df_cases.loc[
df_cases[COUNTY]==LOS_ANGELES
].drop(columns=COUNTY).reset_index(drop=True).copy()
la_cases[DATE] = pd.to_datetime(la_cases[DATE])
# Forward fill new cases for negative new cases day.
la_cases.loc[198, NEW_CASES] = pd.NA
la_cases[NEW_CASES].ffill(inplace=True)
la_cases[NEW_CASES_AVG] = la_cases.loc[:, NEW_CASES].rolling(CASE_ROLLING_WINDOW).mean()
df_hospitalized = pd.read_csv(CA_HOSPITALIZED_CSV).rename(columns={'todays_date': DATE})
la_hospitalized = df_hospitalized.loc[
df_hospitalized[COUNTY]==LOS_ANGELES].drop(columns=COUNTY).reset_index(drop=True)
la_hospitalized.loc[:, DATE] = pd.to_datetime(la_hospitalized.loc[:, DATE])
daily_average = (
('hospitalized_covid_confirmed_patients', HOSPITALIZED_CONFIRMED_AVG),
('hospitalized_covid_patients', HOSPITALIZED_ALL_AVG),
)
for col_day, col_avg in daily_average:
la_hospitalized[col_avg] = la_hospitalized[col_day].rolling(3).mean().round(1)
df_la = pd.merge(la_cases, la_hospitalized, on=DATE).reset_index(drop=True)
df_la[SEMESTER] = df_la.loc[:, DATE].apply(
lambda x: FALL_2020 if x <= FALL_2020_START else SPRING_2021)
df_la[DAYS_UNTIL_SEMESTER] = df_la.apply(days_until_start, 'columns')
df_la = df_la.loc[:, (DATE, SEMESTER, DAYS_UNTIL_SEMESTER,
NEW_CASES_AVG, HOSPITALIZED_CONFIRMED_AVG, HOSPITALIZED_ALL_AVG)]
# In[ ]:
fig, ax = plt.subplots(figsize=(8, 5.5), dpi=300)
rate_multiplier = (10_257_557 / 1e5) / 0.500
substantial_rate, moderate_rate = [rate_multiplier * x for x in (7, 4)]
widespread_color = '#802f67'
substantial_color = '#c43d53'
moderate_color = '#d97641'
widespread_message = 'Closed for in-person lectures'
substantial_message, moderate_message = [
'Lecture capacity limited to {}%'.format(x) for x in (25, 50)]
vertical_pad = 100
horizontal_pad = 5
alpha = 0.75
ax.text(horizontal_pad, substantial_rate+vertical_pad, widespread_message,
ha='right', color=widespread_color, alpha=alpha)
ax.axhline(substantial_rate, color=substantial_color, linestyle='dashed', alpha=alpha)
ax.text(horizontal_pad, substantial_rate-vertical_pad, substantial_message,
ha='right', va='top', color=substantial_color, alpha=alpha)
# ax.axhline(moderate_rate, color=moderate_color, linestyle='dashed', alpha=alpha)
# ax.text(horizontal_pad, moderate_rate-vertical_pad, moderate_message,
# ha='right', va='top', color=moderate_color, alpha=alpha)
ax.set_title('Los Angeles County COVID-19 Transmission before TCC Semester')
sns.lineplot(x=DAYS_UNTIL_SEMESTER, y=NEW_CASES_AVG, hue=SEMESTER, data=df_la, ax=ax)
tick_step = 1500
y_max = chart_upper_bound(df_la[NEW_CASES_AVG], tick_step, 200)
ax.set_yticks(list(range(0, y_max, tick_step)))
ax.set_yticklabels([f'{int(x):n}' if x%3_000==0 else '' for x in ax.get_yticks()])
ax.set_xlabel(X_AXIS_LABEL)
ax.set_ylabel(NEW_CASES_AVG)
ax.set_xlim(120, 0)
ax.xaxis.set_major_formatter(FuncFormatter(date_axis_text))
# ax.set_ylim(moderate_rate-vertical_pad-250, df_la[NEW_CASES_AVG].max()+100)
ax.set_ylim(0, y_max)
ax.legend(loc='upper left', title=SEMESTER)
fig.savefig('docs/semester-start-v-new-cases.png')
fig.show()
# In[ ]:
fig, ax = plt.subplots(figsize=(8, 5), dpi=300)
ax.plot(DAYS_UNTIL_SEMESTER, HOSPITALIZED_ALL_AVG, 'b--', label='Fall 2020, Confirmed & Suspected',
data=df_la[df_la[SEMESTER] == FALL_2020])
ax.plot(DAYS_UNTIL_SEMESTER, HOSPITALIZED_CONFIRMED_AVG, 'b-', label='Fall 2020, Confirmed',
data=df_la[df_la[SEMESTER] == FALL_2020])
ax.plot(DAYS_UNTIL_SEMESTER, HOSPITALIZED_ALL_AVG, '--', color=sns.color_palette()[1],
label='Spring 2021, Confirmed & Suspected', data=df_la[df_la[SEMESTER] == SPRING_2021])
ax.plot(DAYS_UNTIL_SEMESTER, HOSPITALIZED_CONFIRMED_AVG, color=sns.color_palette()[1], label='Spring 2021, Confirmed',
data=df_la[df_la[SEMESTER] == SPRING_2021])
tick_step = 1000
y_max = chart_upper_bound(df_la[HOSPITALIZED_ALL_AVG], tick_step, 200)
ax.set_yticks(list(range(0, y_max, tick_step)))
ax.set_yticklabels([f'{int(x):n}' if x%2_000==0 else '' for x in ax.get_yticks()])
ax.set_xlabel(X_AXIS_LABEL)
ax.xaxis.set_major_formatter(FuncFormatter(date_axis_text))
ax.set_ylabel('Hospitalized, 3 day avgerage')
ax.set_title('Los Angeles County COVID-19 Hospital Patients before TCC Semester')
ax.set_xlim(120, 0)
legend_top = 600
# ax.axhline(legend_top, color='k')
ax.set_ylim(chart_lower_bound(y_max, .2, legend_top), y_max)
ax.legend(title='Semester, Patient COVID-19 Diagnosis', loc='lower right',
ncol=2, fontsize='small', title_fontsize='small')
fig.savefig('docs/semester-start-v-hospitalized.png')
fig.show()
# In[ ]:
LACDPH_CSV = 'lacdph.csv'
r = requests.get('https://github.com/amhirsch/lac_covid19/raw/master/docs/time-series/aggregate-ts.csv')
if r.status_code == 200:
with open(LACDPH_CSV, 'w') as f:
f.write(r.text)
else:
raise ConnectionError('LACDPH Time Series Unavailable')
# In[ ]:
df_lacdph = pd.read_csv(LACDPH_CSV)
df_lacdph[DATE] = | pd.to_datetime(df_lacdph['Date']) | pandas.to_datetime |
import warnings
import pandas as pd
from transformers import (AutoModelForMaskedLM,
AutoTokenizer, LineByLineTextDataset,
DataCollatorForLanguageModeling,
Trainer, TrainingArguments)
warnings.filterwarnings('ignore')
def get_task_data(data_path):
with codecs.open(data_path, mode='r', encoding='utf8') as f:
reader = f.readlines(f)
data_list = []
for dialogue_ in reader:
dialogue_content = []
dialogue_ = json.loads(dialogue_)
_dialog_id = dialogue_['dialog_id']
for content_idx_, contents_ in enumerate(dialogue_['dialog_info']):
dialogue_content.append(contents_['sender'] + ':' + contents_['text'])
data_list.append(';'.join(dialogue_content))
return | pd.DataFrame(data_list, columns=['text']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Road-Friction-Forecasting.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1W15eOQbeHp9wbJWRaE0f7ZfYv_jAj14O
# Authors:
**<NAME>**
* LinkedIn: https://www.linkedin.com/in/md-abrar-jahin-9a026018b
* Facebook: https://www.facebook.com/
* Github: https://github.com/Abrar2652
* email: <EMAIL>
**<NAME>**
* Website: https://krutsylo.neocities.org
* email: <EMAIL>
# Import Libraries and Packages
After importing libraries and packages, we start off by defining a function `transform_to_supervised` that creates desired **lag** *(24 hours in this case)* and **forecasting** features *(1 hour)* of our independent variables concatening with the dataframe and returns the final dataframe.
"""
import os
import optuna
import pickle
import pandas as pd
from optuna import Trial
from optuna.samplers import TPESampler
from sklearn.impute import KNNImputer
from sklearn.model_selection import StratifiedKFold, cross_val_score
from xgboost import XGBClassifier, XGBRegressor
from matplotlib import pyplot as plt
from sklearn.metrics import mean_absolute_error, accuracy_score, balanced_accuracy_score
import numpy as np
def transform_to_supervised(df,
previous_steps=1,
forecast_steps=1,
dropnan=False):
"""
https://gist.github.com/monocongo/6e0df19c9dd845f3f465a9a6ccfcef37
Transforms a DataFrame containing time series data into a DataFrame
containing data suitable for use as a supervised learning problem.
Derived from code originally found at
https://machinelearningmastery.com/convert-time-series-supervised-learning-problem-python/
:param df: pandas DataFrame object containing columns of time series values
:param previous_steps: the number of previous steps that will be included in the
output DataFrame corresponding to each input column
:param forecast_steps: the number of forecast steps that will be included in the
output DataFrame corresponding to each input column
:return Pandas DataFrame containing original columns, renamed <orig_name>(t), as well as
columns for previous steps, <orig_name>(t-1) ... <orig_name>(t-n) and columns
for forecast steps, <orig_name>(t+1) ... <orig_name>(t+n)
"""
# original column names
col_names = df.columns
# list of columns and corresponding names we'll build from
# the originals found in the input DataFrame
cols, names = list(), list()
# input sequence (t-n, ... t-1)
# Lag features
for i in range(previous_steps, 0, -1):
cols.append(df.shift(i))
names += [('%s(t-%d)' % (col_name, i)) for col_name in col_names]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, forecast_steps):
cols.append(df.shift(-i))
if i == 0:
names += [('%s(t)' % col_name) for col_name in col_names]
else:
names += [('%s(t+%d)' % (col_name, i)) for col_name in col_names]
# put all the columns together into a single aggregated DataFrame
agg = pd.concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
"""# Data Collection and Import data
The dataset has been collected from the **Smart Road - Winter Road Maintenance Challenge 2021** organized by *UiT The Arctic University of Norway* on Devpost.
Dataset download link: https://uitno.app.box.com/s/bch09z27weq0wpcv8dbbc18sxz6cycjt
After downloading the `smart_road_measurements.csv` file from the competition page, wehad added extra columns collecting data from the external resources authorized the organizers. The links of the external datasets are:
[1] Weather data https://pypi.org/project/wwo-hist/
[2] UV Index data https://pyowm.readthedocs.io/en/latest/v3/uv-api-usage-examples.html
After merging these 3 files together based on the same dates, we finalized our main dataset `smart_road_measurements_new_d_weather.csv` on top of which we will build our model after preprocessing.
"""
df = pd.read_csv("/content/smart_road_measurements_new_d_weather.csv", header=0)
df2 = df.copy()
df.head(15)
"""# Exploratory Data Analysis
Our dataset contains 349613 rows and 29 columns
"""
df.shape
df.info()
import numpy as np
np.random.seed(0)
import seaborn as sns
sns.set_theme()
_ = sns.heatmap(df2.iloc[:,2:11].corr())
_ = sns.heatmap(df2.corr())
"""We want to predict Friction of the road by weather conditions. So,
this is a classification task. Every day the car drives on a new route.
This means that all 11 days we receive data on new road sections. So, the
only link between the road sections is the average weather conditions.
This can be achieved by filtering the rows on **Microsoft Excel** for each date and get the total distance covered (the last row on each date because the column is cumulative in nature)
**Max Distance traveled, Date**
42441, 16/02/2021
92311, 17/02/2021
150216, 18/02/2021
39007, 19/02/2021
71358, 22/02/2021
81999, 23/02/2021
55958, 24/02/2021
77315, 25/02/2021
55647, 26/02/2021
61534, 1/03/2021
12409, 2/03/2021
**Therefore, we can see from the above data that for all 11 days the car was driving at different routes**
* We drop the `Distance` because the condition of the road does not depend on how much the car has traveled before. We use this column to get the speed and slope of the road.
* This means that we are using normalized data + lag (time-series
classification with engineered features instead of time-series
classification with deep learning, because we have shallow data).
We won't focus on any complicated models, just XGBClassifier to win.
* Now we need to define at what Friction the road is dangerous (label 0),
requires caution (label-1) and safe (label-2).
Ta, Tsurf, friction are **highly correlated** which has been shown in our pandas profiling
https://krutsylo.neocities.org/SmartRoads/pandas3.html of the smart road dataset.
Yet we'll drop State, Height, Distance, Ta, Tsurf, Water, moon-illumination, uvIndex columns
"""
df = df.drop("Height", axis=1) # contain N/A
df = df.drop("Distance", axis=1)
df = df.drop("State", axis=1)
df = df.drop("Ta", axis=1)
df = df.drop("Tsurf", axis=1)
df = df.drop("Water", axis=1)
df = df.drop("moon_illumination", axis=1)
df = df.drop("uvIndex", axis=1)
df.head()
""" We have grouped the data by calculating the mean of the rows in each hour based on the individual dates. For instance, if there are 8 rows for each hour, we calculated the mean of 8 rows and thus converted into a single row belonging to the distinct dates.
We also avoided duplicates to reduce the noise in the data.
"""
df['Time(+01:00)'] = | pd.to_datetime(df['Time(+01:00)'], format='%H:%M:%S') | pandas.to_datetime |
import pandas as pd
import copy
import numpy as np
import re
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
class CleanText(BaseEstimator, TransformerMixin):
def __init__(self, feature_name):
self.feature_name = feature_name
self.values = None
def clean_text(self, value):
value_ = copy.copy(value).lower()
value_ = value_.replace(" ", "")
value_ = value_.replace("\n", "")
value_ = value_.replace("\t", "")
if self.feature_name == "InterventionLocationName":
if ("at" in value) and (value_.replace("at", "@").split("@")[0] in self.values):
value_ = value_.replace("at", "@")
if "@" in value_:
value_ = value_.split("@")[0]
if "/" in value_:
value_ = value_.split("/")[0]
value_ = re.sub("[\W]", "", value_)
value_ = self.clean_intervention_location_name(value_)
return value_
elif self.feature_name == "Department Name":
value_ = value_.replace("@", "at")
value_ = re.sub("[\W]", "", value_)
value_ = self.clean_department_name(value_)
return value_
else:
value_ = re.sub("[\W]", "", value_)
return value_
def clean_intervention_location_name(self, value):
value_ = copy.copy(value)
if value_ in ["", "none", "na", "nan"]:
return None
elif "unknown" in value_:
return None
elif value_ in ["othertown", "ve", "venue", "yourcity", "st", "street", "dr", "drive", "ville"]:
return "others"
elif value_[-6:] == "street":
return value_[:-6]
elif value_[-2:] == "st":
return value_[:-2]
elif value_[-5:] == "drive":
return value_[:-5]
elif value_[-2:] == "dr":
return value_[:-2]
elif value_[-5:] == "ville":
return value_[:-5]
elif value_[-5:] == "venue":
return value_[:-5]
elif value_[-2:] == "ve":
return value_[:-2]
else:
return value_
def clean_department_name(self, value):
value_ = copy.copy(value)
if "police" in value_:
return value_.replace("police", "")
else:
return value_
def split_text(self, value):
value_ = copy.copy(value)
value_ = value_.lower().replace(" ", "")
if "@" in value_:
value_ = value_.split("@")[0]
if "/" in value_:
value_ = value_.split("/")[0]
return value_
def fit(self, X, y=None):
X_ = copy.copy(X)
self.values = X_[self.feature_name].astype("str").apply(self.split_text).unique()
return self
def transform(self, X):
X_ = copy.copy(X)
X_[self.feature_name] = X_[self.feature_name].astype("str")
X_[self.feature_name] = X_[self.feature_name].apply(lambda x: self.clean_text(x))
return X_
class SetOthers(BaseEstimator, TransformerMixin):
def __init__(self, feature_name, threshold):
self.feature_name = feature_name
self.threshold = threshold
self.values = None
def get_values(self, X):
number_ocurrencies = X[self.feature_name].value_counts()
values = number_ocurrencies[number_ocurrencies > self.threshold].index
return values
def clean_value(self, value):
if value in self.values:
return value
else:
return "other"
def fit(self, X, y=None):
self.values = self.get_values(X)
return self
def transform(self, X):
X_ = copy.copy(X)
X_[self.feature_name] = X_[self.feature_name].astype("str")
X_[self.feature_name] = X_[self.feature_name].apply(lambda x: self.clean_value(x))
return X_
class TimeFeatures(BaseEstimator, TransformerMixin):
def __init__(self, feature_name, time_format="%m/%d/%Y %H:%M:%S", month=True, weekday=True, hour=True):
self.feature_name = feature_name
self.time_format = time_format
if month == True:
self.month = True
else:
self.month = False
if weekday == True:
self.weekday = True
else:
self.weekday = False
if hour == True:
self.hour = True
else:
self.hour = False
def clean_timestamp(self, value):
if value[-2:] == "AM" and value[11:13] == "12":
return value[:11] + "00" + value[13:-3]
# remove the AM
elif value[-2:] == "AM":
return value[:-3]
# Checking if last two elements of time
# is PM and first two elements are 12
elif value[-2:] == "PM" and value[11:13] == "12":
return value[:-3]
else:
# add 12 to hours and remove PM
return value[:11] + str(int(value[11:13]) + 12) + value[13:-3]
def create_features(self, X):
X_ = copy.copy(X)
if self.weekday == True:
X_["weekday"] = X_[self.feature_name].dt.weekday
if self.hour == True:
X_["hour"] = X_[self.feature_name].dt.hour
if self.month == True:
X_["month"] = X_[self.feature_name].dt.month
return X_
def create_cyclical_feature(self, X, name, period):
X_ = copy.copy(X)
X_['sin_' + name] = np.sin(2 * np.pi * X_[name] / period)
X_['cos_' + name] = np.cos(2 * np.pi * X_[name] / period)
X_ = X_.drop(columns=[name])
return X_
def fit(self, X, y=None):
return self
def transform(self, X):
X_ = copy.copy(X)
X_[self.feature_name] = X_[self.feature_name].apply(self.clean_timestamp)
X_[self.feature_name] = X_[self.feature_name].apply(lambda x: | pd.to_datetime(x, format=self.time_format) | pandas.to_datetime |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = | pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) | pandas.date_range |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Retrieve bikeshare stations metadata."""
# pylint: disable=invalid-name
from io import BytesIO
from typing import Dict, List
from urllib.request import urlopen
from zipfile import ZipFile
import geopandas as gpd
import pandas as pd
import pandera as pa
import requests
ch_essentials_schema = pa.DataFrameSchema(
columns={
"ID": pa.Column(pa.Int),
"NAME": pa.Column(pd.StringDtype()),
"POI_LATITUDE": pa.Column(
pa.Float64,
nullable=True,
),
"POI_LONGITUDE": pa.Column(
pa.Float64,
nullable=True,
),
},
index=pa.Index(pa.Int),
)
poi_schema = pa.DataFrameSchema(
columns={
"ID": pa.Column(pa.Int, unique=True),
"ADDRESS_INFO": pa.Column(pd.StringDtype()),
"NAME": pa.Column(pd.StringDtype(), unique=True),
"CATEGORY": pa.Column(pd.StringDtype()),
"PHONE": pa.Column(pd.StringDtype()),
"EMAIL": pa.Column(pd.StringDtype()),
"WEBSITE": pa.Column(pd.StringDtype()),
"GEOID": pa.Column(pa.Float, nullable=True),
"RECEIVED_DATE": pa.Column(pd.StringDtype()),
"ADDRESS_POINT_ID": pa.Column(pa.Float, nullable=True),
"LINEAR_NAME_FULL": pa.Column(pd.StringDtype()),
"ADDRESS_FULL": pa.Column(pd.StringDtype()),
"POSTAL_CODE": pa.Column(pd.StringDtype()),
"MUNICIPALITY": pa.Column(pd.StringDtype()),
"CITY": pa.Column(pd.StringDtype()),
"PLACE_NAME": pa.Column(pd.StringDtype()),
"GENERAL_USE_CODE": pa.Column(pa.Float, nullable=True),
"CENTRELINE": pa.Column(pa.Float, nullable=True),
"LO_NUM": pa.Column(pa.Float, nullable=True),
"LO_NUM_SUF": pa.Column(pd.StringDtype()),
"HI_NUM": pa.Column(pd.StringDtype()),
"HI_NUM_SUF": pa.Column(pd.StringDtype()),
"LINEAR_NAME_ID": pa.Column(pa.Float, nullable=True),
"WARD": pa.Column( | pd.StringDtype() | pandas.StringDtype |
import unittest
import pandas as pd
import calendar
import time
from application.model_functions import *
class Testing(unittest.TestCase):
def test_isempty(self):
df1 = pd.DataFrame()
self.assertTrue(isempty_df(df1))
df2 = pd.DataFrame([[0,'abcd',0,1,123]],columns=['a','b','c','d','e'])
self.assertFalse(isempty_df(df2))
def test_convert_to_epoch(self):
#TODO: REVIEW FUNCTION
pass
# df1 = pd.DataFrame([["Wednesday, 27-Jul-16 11:37:51 UTC"]],columns=['time'])
# df1 = convert_to_epoch(df1, "time")
# df2 = pd.DataFrame([[1469619471]],columns=['time'])
# self.assertEqual(df1['time'][0], df2['time'][0])
def room_number(self):
df1 = pd.DataFrame([['B002']],columns=['room'])
df2 = pd.DataFrame([['B106']],columns=['room'])
df3 = pd.DataFrame([['SC208W']],columns=['room'])
self.assertEqual(df1['room'][0], 2)
self.assertEqual(df2['room'][0], 106)
self.assertEqual(df3['room'][0], 208)
def estimate_occ(self):
df1 = | pd.DataFrame([[2, 0],[3, 0.5],[4, 1]], columns=['room', 'occupancy_rate']) | pandas.DataFrame |
import pandas as pd
from django.db.models import Count
from dateutil.parser import parse
from dateutil.tz import gettz
from django.conf import settings
class ModelResource:
"""
Model resources:
Exports queryset of model into excel and csv file
And import excel or csv file data into model
"""
def __init__(self, queryset=None):
"""
Initializing model resource
Input: queryset (not required)
"""
self.__list = None
if queryset != None:
self.__process_queryset(queryset)
del queryset
def __get_fields(self):
"""
-- used for IMPORT EXPORT --
Returns list of fields for resource
"""
has_fields = False
fields = []
# fields
if hasattr(self.Meta, 'fields'):
# fields array
if type(self.Meta.fields) == list:
fields = self.Meta.fields
has_fields = True
# all fields
elif self.Meta.fields == '__all__':
for model_field in self.Meta.model._meta.fields:
fields.append(model_field.name)
for model_field in self.Meta.model._meta.many_to_many:
fields.append(model_field.name)
has_fields = True
# exlude fields
elif hasattr(self.Meta, 'exclude'):
if type(self.Meta.exclude) == list:
for model_field in self.Meta.model._meta.fields:
if not model_field.name in self.Meta.exclude:
fields.append(model_field.name)
for model_field in self.Meta.model._meta.many_to_many:
if not model_field.name in self.Meta.exclude:
fields.append(model_field.name)
has_fields = True
if not has_fields:
raise Exception('fields is required')
if 'id' not in fields:
fields.insert(0, 'id')
for field in fields:
field_type = self.Meta.model._meta.get_field(field).get_internal_type()
if field_type == 'OneToOneField':
if not hasattr(self, field):
setattr(self, field, OneToOneResource())
elif field_type == 'ForeignKey':
if not hasattr(self, field):
setattr(self, field, ForeignKeyResource())
elif field_type == 'ManyToManyField':
if not hasattr(self, field):
setattr(self, field, ManyToManyResource())
return fields
def __get_database_fields(self):
"""
-- used for IMPORT EXPORT --
Returns list of database fields (normal, foreign, related, m2m) for values
"""
fields = self.__get_fields() # getting all fields
db_fields = list() # database list to be appended
temp_attr = None
for field in fields:
if hasattr(self, field): # checking if field has attribute (special field)
temp_attr = getattr(self, field)
if type(temp_attr) == OneToOneResource:
db_fields.append([field, 'o2o'])
elif type(temp_attr) == ForeignKeyResource:
db_fields.append([field, 'foreign'])
elif type(temp_attr) == RelatedResource:
db_fields.append([field, 'related'])
elif type(temp_attr) == ManyToManyResource:
db_fields.append([field, 'm2m'])
else:
db_fields.append([field, 'normal'])
return db_fields
def __process_queryset(self, queryset):
"""
-- used for EXPORT --
Process queryset into dictionary list that is to be saved
"""
db_fields = self.__get_database_fields() # categoried resource fields wrt db
self.__fields_values = list() # fields values to be converted in list before renaming and converted to dataframe
self.__rename_values = dict() # foreign key to be rename from 'project__column' to 'project'
annotate_dict = dict() # queryset annotate fields for m2m and related fields
queryset_values = list() # queryset values names
for db_field in db_fields:
if db_field[1] == 'normal':
# normal field (just add to queryset_values and fields_values)
self.__fields_values.append( db_field[0] )
queryset_values.append( db_field[0] )
elif db_field[1] == 'foreign' or db_field[1] == 'o2o':
# foreignkey field ( add to queryset_values, fields_values and rename_values)
self.__fields_values.append( db_field[0] + '__' + getattr(self, db_field[0]).column )
queryset_values.append( db_field[0] )
self.__rename_values[db_field[0] + '__' + getattr(self, db_field[0]).column ] = db_field[0]
else:
# m2m and related fields ( add to fields_values, postprocess_fields and annotate_dict to take just count as its column)
self.__fields_values.append(db_field[0])
annotate_dict[db_field[0]] = Count(db_field[0])
# gettings values from queryset
self.__list = list(queryset.order_by('id').values(*queryset_values).annotate(**annotate_dict).values(*self.__fields_values))
# appending one to many fields to list
o2m_field = self.__get_o2m_field(db_fields, queryset)
for i in range(0, len(self.__list)):
for field in o2m_field[self.__list[i]['id']]:
self.__list[i][field] = o2m_field[self.__list[i]['id']][field]
del queryset
def __get_o2m_field(self, db_fields, queryset):
"""
-- used for Export --
returns o2m and related queryset
"""
postprocess_fields = [ db_field[0] for db_field in db_fields if db_field[1] == 'related' or db_field[1] == 'm2m' ]
o2m_field = dict()
for obj in queryset:
o2m_field[obj.id] = dict()
for field in postprocess_fields:
texts = [ str(getattr(m2m_obj, getattr(self, field).column)) for m2m_obj in getattr(obj, field).all() ]
o2m_field[obj.id][field] = ",".join(texts)
return o2m_field
def __check_queryset(self):
"""
-- used for EXPORT --
Checks if queryset is available or not before exporting
"""
if self.__list == None:
raise Exception('Cannot export without queryset.')
def __get_dataframe(self):
"""
-- used for EXPORT --
returns dataframe from the list
"""
self.__check_queryset()
df = pd.DataFrame(self.__list, columns=self.__fields_values)
df=df.rename(columns = self.__rename_values)
# converting datetimes to Local time
db_fields = self.__get_database_fields()
normal_fields = [ db_field[0] for db_field in db_fields if db_field[1] == 'normal']
for field in normal_fields:
field_class = self.Meta.model._meta.get_field(field).get_internal_type()
if field_class == 'DateTimeField':
df[field] = pd.to_datetime(df[field], unit='s')
try:
setattr(df, field, getattr(df, field).dt.tz_localize('UTC'))
except:
pass
setattr(df, field, getattr(df, field).dt.tz_convert(settings.TIME_ZONE) )
df[field] = df[field].apply(lambda x: '' if pd.isnull(x) else str(x))
elif field_class == 'DateField' or field_class == 'TimeField':
df[field] = df[field].apply(lambda x: '' if pd.isnull(x) else str(x))
return df
def to_excel(self, file_name):
"""
saves queryset to respective excel file
"""
self.__get_dataframe().to_excel(file_name, index=False)
def to_csv(self, file_name):
"""
saves queryset to respective csv file
"""
self.__get_dataframe().to_csv(file_name, index=False)
def __set_dataframe(self, df):
"""
-- used for EXPORT --
returns dataframe from the list
"""
self.__list = list(df.T.to_dict().values())
def __save_list(self, df):
"""
saves list of dictionaries to model (if id is empty creates as new object)
"""
update_df = df.loc[ pd.notnull(df['id']) ] # row that contain id
self.__update_list(update_df)
new_df = df.loc[ pd.isnull(df['id']) ] # row that doesn't contain id (considered as new object)
self.__create_list(new_df)
def __update_list(self, df):
"""
updates update_df list to model
"""
db_fields = self.__get_database_fields()
for row in list(df.T.to_dict().values()):
row_id = 0
self_kwargs = dict() # dictionary arguments passed to model object { 'field_name': 'Field_value'}
m2m_fields = list() # m2m list carrying [ ['field_name', 'field_value'], ... ]
for field in db_fields:
# converting NaN and NaT to None
if | pd.isnull(row[field[0]]) | pandas.isnull |
"""Compile 1/f-exponents across sessions for further computation in R."""
import os
import pandas as pd
import numpy as np
df = pd.read_csv('../csv/sessions.csv')
fmin = 1
fmax = 10
rsquare_threshold = 0.95
exp_folder = '../results/exponents/'
dfs = []
nr_segments = []
for subject in df.subject_id:
df_file_name = '%s/%s_exponents.csv' % (exp_folder, subject)
df_exp = pd.read_csv(df_file_name)
nr_segments.append(len(df_exp))
df_file_name = '%s/%s_rsquare.csv' % (exp_folder, subject)
df_r = | pd.read_csv(df_file_name) | pandas.read_csv |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270: pd.Timestamp("2013-01-27 00:00:00"),
271: pd.Timestamp("2013-01-28 00:00:00"),
272: pd.Timestamp("2013-01-29 00:00:00"),
273: pd.Timestamp("2013-01-30 00:00:00"),
274: pd.Timestamp("2013-01-31 00:00:00"),
275: pd.Timestamp("2013-02-01 00:00:00"),
276: pd.Timestamp("2013-02-02 00:00:00"),
277: pd.Timestamp("2013-02-03 00:00:00"),
278: pd.Timestamp("2013-02-04 00:00:00"),
279: pd.Timestamp("2013-02-05 00:00:00"),
280: pd.Timestamp("2013-02-06 00:00:00"),
281: pd.Timestamp("2013-02-07 00:00:00"),
282: pd.Timestamp("2013-02-08 00:00:00"),
283: pd.Timestamp("2013-02-09 00:00:00"),
284: pd.Timestamp("2013-02-10 00:00:00"),
285: pd.Timestamp("2013-02-11 00:00:00"),
286: pd.Timestamp("2013-02-12 00:00:00"),
287: pd.Timestamp("2013-02-13 00:00:00"),
288: pd.Timestamp("2013-02-14 00:00:00"),
289: pd.Timestamp("2013-02-15 00:00:00"),
290: pd.Timestamp("2013-02-16 00:00:00"),
291: pd.Timestamp("2013-02-17 00:00:00"),
292: pd.Timestamp("2013-02-18 00:00:00"),
293: pd.Timestamp("2013-02-19 00:00:00"),
294: pd.Timestamp("2013-02-20 00:00:00"),
295: pd.Timestamp("2013-02-21 00:00:00"),
296: pd.Timestamp("2013-02-22 00:00:00"),
297: pd.Timestamp("2013-02-23 00:00:00"),
298: pd.Timestamp("2013-02-24 00:00:00"),
299: pd.Timestamp("2013-02-25 00:00:00"),
300: pd.Timestamp("2013-02-26 00:00:00"),
301: pd.Timestamp("2013-02-27 00:00:00"),
302: pd.Timestamp("2013-02-28 00:00:00"),
303: pd.Timestamp("2013-03-01 00:00:00"),
304: pd.Timestamp("2013-03-02 00:00:00"),
305: pd.Timestamp("2013-03-03 00:00:00"),
306: pd.Timestamp("2013-03-04 00:00:00"),
307: pd.Timestamp("2013-03-05 00:00:00"),
308: pd.Timestamp("2013-03-06 00:00:00"),
309: pd.Timestamp("2013-03-07 00:00:00"),
310: pd.Timestamp("2013-03-08 00:00:00"),
311: pd.Timestamp("2013-03-09 00:00:00"),
312: pd.Timestamp("2013-03-10 00:00:00"),
313: | pd.Timestamp("2013-03-11 00:00:00") | pandas.Timestamp |
#!/usr/bin/env python
"""
Module implementing the Data class that manages data for
it's associated PandasTable.
Created Jan 2014
Copyright (C) <NAME>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from types import *
import operator
import os, string, types, copy
import pickle
import numpy as np
import pandas as pd
from . import util
class TableModel(object):
"""A data model for the Table class that uses pandas
Args:
dataframe: pandas dataframe
rows: number of rows if empty table
columns: number of columns if empty table
"""
keywords = {'colors':'colors'}
def __init__(self, dataframe=None, rows=20, columns=5):
"""Constructor for table model. """
self.initialiseFields()
self.setup(dataframe, rows, columns)
return
def setup(self, dataframe, rows=20, columns=5):
"""Create table model"""
if not dataframe is None:
self.df = dataframe
else:
colnames = list(string.ascii_lowercase[:columns])
self.df = pd.DataFrame(index=range(rows),columns=colnames)
#self.df = self.getSampleData()
#self.reclist = self.df.index # not needed now?
return
@classmethod
def getSampleData(self, rows=400, cols=5, n=2):
"""Generate sample data
Args:
rows: no. of rows
cols: columns
n: length of column names
"""
import random
s = string.ascii_lowercase
def genstr(n=2):
return ''.join(random.choice(s) for i in range(n))
maxrows = 5e6
if rows>maxrows:
rows=maxrows
if cols>1e5:
cols=int(1e5)
n=2
if cols>100: n=3
colnames = [genstr(n) for i in range(cols)]
coldata = [np.random.normal(x,1,rows) for x in np.random.normal(5,3,cols)]
n = np.array(coldata).T
df = pd.DataFrame(n, columns=colnames)
col1 = colnames[0]
col2 = colnames[1]
df[col2] = df[col1]*np.random.normal(.8, .2, len(df))
df = np.round(df, 3)
cats = ['low','medium','high','very high']
df['label'] = pd.cut(df[col1], bins=4, labels=cats).astype(str)
#df['label'] = df.label.cat.as_ordered()
#don't add date if rows too large
if rows<2e6:
df['date'] = pd.date_range('1/1/2016', periods=rows, freq='H')
return df
@classmethod
def getIrisData(self):
"""Get iris dataset"""
path = os.path.dirname(__file__)
cols = ['sepal length','sepal width','petal length','petal width','class']
df = pd.read_csv(os.path.join(path,'datasets','iris.data'),names=cols)
return df
@classmethod
def getStackedData(self):
"""Get a dataframe to pivot test"""
import pandas.util.testing as tm; tm.N = 4
frame = tm.makeTimeDataFrame()
N, K = frame.shape
data = {'value' : frame.values.ravel('F'),
'variable' : np.asarray(frame.columns).repeat(N),
'date' : np.tile(np.asarray(frame.index), K)}
return pd.DataFrame(data, columns=['date', 'variable', 'value'])
def initialiseFields(self):
"""Create meta data fields"""
self.meta = {}
self.columnwidths = {} #used to store col widths
return
def save(self, filename):
"""Save dataframe"""
ftype = os.path.splitext(filename)[1]
if ftype == '.mpk':
self.df.to_msgpack(filename)
elif ftype == '.pickle':
self.df.to_pickle(filename)
elif ftype == '.xls':
self.df.to_excel(filename)
elif ftype == '.csv':
self.df.to_csv(filename)
#elif ftype == '.html':
# self.df.to_html(filename)
return
def load(self, filename, filetype=None):
"""Load file, if no filetype given assume it's msgpack format"""
if filetype == '.pickle':
self.df = pd.read_pickle(filename)
else:
self.df = pd.read_msgpack(filename)
#print (len(self.df))
return
def getlongestEntry(self, colindex, n=500):
"""Get the longest string in the column for determining width. Just uses the first
n rows for speed"""
df = self.df
col = df.columns[colindex]
try:
if df.dtypes[col] == 'float64':
c = df[col][:n].round(3)
else:
c = df[col][:n]
except:
return 1
longest = c.astype('object').astype('str').str.len().max()
if np.isnan(longest):
return 1
return longest
def getRecordAtRow(self, rowIndex):
"""Get the entire record at the specifed row"""
name = self.getRecName(rowIndex)
record = self.df.ix[name]
return record
def moveColumn(self, oldindex, newindex):
"""Changes the order of columns"""
df = self.df
cols = list(df.columns)
name = cols[oldindex]
del cols[oldindex]
cols.insert(newindex, name)
self.df = df[cols]
return
def autoAddRows(self, num):
"""Add n rows to end of dataframe. Will create rows with index starting
from highest previous row count"""
df = self.df
if len(df) == 0:
self.df = pd.DataFrame(pd.Series(range(num)))
print (df)
return
try:
ind = self.df.index.max()+1
except:
ind = len(df)+1
new = pd.DataFrame(np.nan, index=range(ind,ind+num), columns=df.columns)
self.df = pd.concat([df, new])
return
def addRow(self, rowindex):
"""Inserts a row at the required index by append/concat"""
df = self.df
a, b = df[:rowindex], df[rowindex:]
a = a.append(pd.Series(), ignore_index=1)
self.df = | pd.concat([a,b]) | pandas.concat |
from backend.lib import sql_queries
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
def test_get_user_info_for_existing_user(refresh_db_once, db_connection_sqlalchemy):
engine = db_connection_sqlalchemy
user_id = sql_queries.get_user_id(engine, email='<EMAIL>', password='<PASSWORD>')
assert user_id == 1
def test_get_user_info_with_wrong_password_results_in_no_users_found(refresh_db_once, db_connection_sqlalchemy):
engine = db_connection_sqlalchemy
user_id = sql_queries.get_user_id(engine, email='<EMAIL>', password='<PASSWORD>')
assert user_id is None
def test_get_user_info_with_wildcard_email_address_results_in_no_users_found(refresh_db_once, db_connection_sqlalchemy):
engine = db_connection_sqlalchemy
user_id = sql_queries.get_user_id(engine, email='*', password='<PASSWORD>')
assert user_id is None
def test_get_user_info_with_wildcard_password_results_in_no_users_found(refresh_db_once, db_connection_sqlalchemy):
engine = db_connection_sqlalchemy
user_id = sql_queries.get_user_id(engine, email='<EMAIL>', password='*')
assert user_id is None
def test_get_user_info_for_non_existant_user(refresh_db_once, db_connection_sqlalchemy):
engine = db_connection_sqlalchemy
user_id = sql_queries.get_user_id(engine, email='<EMAIL>', password='<PASSWORD>')
assert user_id is None
def test_count_input_data_items_for_all_users_and_label_tasks(refresh_db_once, db_connection_sqlalchemy):
df_test = pd.DataFrame()
df_test['user_id'] = [1, 1, 1, 1, 2, 2, 2, 3, None, None]
df_test['label_task_id'] = [1, 2, 3, 5, 1, 2, 5, 1, 4, 6]
df_test['total_items'] = [5, 3, 1, 5, 5, 2, 5, 1, 1, 1]
df_test['num_unlabeled'] = [2, 2, 1, 5, 4, 2, 5, 0, 1, 1]
df_test['num_labeled'] = [3, 1, 0, 0, 1, 0, 0, 1, 0, 0]
engine = db_connection_sqlalchemy
df = sql_queries.count_input_data_items_per_user_per_label_task(engine, label_task_id=None, user_id=None)
assert_series_equal(df['user_id'], df_test['user_id'])
assert_series_equal(df['label_task_id'], df_test['label_task_id'])
assert_series_equal(df['total_items'], df_test['total_items'])
assert_series_equal(df['num_labeled'], df_test['num_labeled'])
def test_get_all_input_data_items(refresh_db_once, db_connection_sqlalchemy):
df_test = pd.DataFrame()
df_test['input_data_id'] = [1, 2, 3, 4, 5]
df_test['dataset_group_id'] = [1, 1, 1, 1, 1]
df_test['dataset_id'] = [1, 1, 2, 2, 2]
engine = db_connection_sqlalchemy
df = sql_queries.get_all_input_data_items(engine, label_task_id=1)
assert_frame_equal(df, df_test)
def test_get_all_user_input_data(refresh_db_once, db_connection_sqlalchemy):
df_test = pd.DataFrame()
df_test['label_id'] = [3, 2, 1]
df_test['input_data_id'] = [3, 2, 1]
df_test['user_id'] = [1, 1, 1]
df_test['label_task_id'] = [1, 1, 1]
engine = db_connection_sqlalchemy
df = sql_queries.get_all_user_input_data(engine, user_id=1, label_task_id=1, n=None)
assert_series_equal(df['label_id'], df_test['label_id'])
assert_series_equal(df['input_data_id'], df_test['input_data_id'])
assert_series_equal(df['user_id'], df_test['user_id'])
assert_series_equal(df['label_task_id'], df_test['label_task_id'])
def test_get_all_user_input_data_filtered(refresh_db_once, db_connection_sqlalchemy):
#filter incomplete
df_test = pd.DataFrame()
df_test['label_id'] = [1]
df_test['input_data_id'] = [1]
df_test['user_id'] = [1]
df_test['label_task_id'] = [1]
engine = db_connection_sqlalchemy
df = sql_queries.get_first_user_input_data(engine, user_id=1, label_task_id=1, label_filter = "filter_incomplete")
print("Received first incomplete entry")
assert_series_equal(df['label_id'], df_test['label_id'])
| assert_series_equal(df['input_data_id'], df_test['input_data_id']) | pandas.testing.assert_series_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
QUANDLKEY = '<Enter your Quandl APT key here>'
"""
Created on Fri Oct 5 23:24:35 2018
@author: jeff
"""
'''*************************************
#1. Import libraries and define key variables
'''
import pandas as pd
import numpy as np
import quandl
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report,roc_curve, auc,confusion_matrix,f1_score
from sklearn.model_selection import train_test_split
from sklearn import tree
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler
import pickle
import graphviz
#KPI keys
quandl.ApiConfig.api_key = QUANDLKEY
'''*************************************
#2. Definition of functions
'''
#2a.Download tickers
def download_tkr(tkr):
record_db_events_gp = pd.DataFrame()
record_db_financials=quandl.get_table('SHARADAR/SF1', calendardate={'gte': '2008-12-31'}, ticker=tkr, dimension='MRY')
record_db_financials['year'] = record_db_financials['reportperiod'].dt.year
record_db_financials['year_1'] = record_db_financials['year']+1
record_db_events=quandl.get_table('SHARADAR/EVENTS', ticker=tkr)
tmp_series = record_db_events['eventcodes'].str.contains('21')
record_db_events= record_db_events[tmp_series]
record_db_events['year'] = record_db_events.date.dt.year
record_db_events= record_db_events.drop(['date'],axis=1)
record_db_events_gp = record_db_events.groupby(['ticker','year'],as_index=False).count()
combined_pd = pd.merge(record_db_financials,record_db_events_gp,how ='left',left_on='year_1',right_on='year')
#convert all events to 1 and NaN
combined_pd.loc[combined_pd['eventcodes']>1,'eventcodes'] = 1
X = record_db_financials.iloc[:,6:-5]
Y = combined_pd.iloc[:,-1]
return combined_pd, X, Y
#tkr = 'AMZN'
#df_tmp = download_tkr(tkr)
#2b.Train tree
def train_tree(X,Y,ind):
print('Decision Tree')
#split the dataset into training set and testing set
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.33, random_state=0)
min_leaf_size = int(len(X_train) * 0.01)
tree_clf = tree.DecisionTreeClassifier(min_samples_leaf=min_leaf_size)
#preprocessing the data
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
#fit the training data to the model
tree_clf.fit(X_train,Y_train)
##metric 1: roc
Y_score_tree = tree_clf.predict(X_test)
fpr, tpr, thresholds = roc_curve(Y_test,Y_score_tree, pos_label=1)
roc_auc = auc(fpr,tpr)
lw=2
plt.figure()
plt.plot(fpr,tpr,color='darkorange',lw=lw,label='ROC curve (area = %0.2f)' %roc_auc)
plt.plot([0,1],[0,1],color='navy',lw=lw,linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic - Decision Tree '+ind)
plt.legend(loc="lower right")
plt.savefig(ind+'_DT.png')
##metric 2: Confusion matrix
Y_pred_tree = tree_clf.predict(X_test)
confusion_matrix_tree = confusion_matrix(Y_test, Y_pred_tree)
print(confusion_matrix_tree)
print(classification_report(Y_test, Y_pred_tree))
#common standard to compare across models
f1_clf = f1_score(Y_test, Y_pred_tree, average='weighted')
##save model
f_tree = open(ind+'_tree_clf.pkl',"wb+")
pickle.dump(tree_clf, f_tree)
f_tree.close()
f_tree_sc = open(ind+'_tree_scaler.pkl',"wb+")
pickle.dump(scaler, f_tree_sc)
f_tree_sc.close()
return tree_clf,f1_clf
##2C Neural Network
#2Ci. Grid search that simulate the performance of different neural network design
def grid_search(X_train,X_test, Y_train,Y_test,num_training_sample):
best_f1 = 0
best_hidden_layers_list = []
best_hidden_layers_tuple = ()
#various depth
for depth in range(1,5):
print('Depth = '+str(depth))
for layer_size in range(1,8):
neuron_cnt = 0
hidden_layers_list = []
i = 0
while i<depth:
hidden_layers_list.append(layer_size)
neuron_cnt += layer_size
i+=1
#pruning - to avoid over-training
if num_training_sample<neuron_cnt:
break
hidden_layers_tuple = tuple(hidden_layers_list)
nn_clf = MLPClassifier(alpha=1e-5,
hidden_layer_sizes=hidden_layers_tuple, random_state=1)
nn_clf.fit(X_train,Y_train)
Y_pred = nn_clf.predict(X_test)
temp_f1 = f1_score(Y_test, Y_pred, average='weighted')
if temp_f1 > best_f1:
best_f1 = temp_f1
best_hidden_layers_list = hidden_layers_list
best_hidden_layers_tuple = hidden_layers_tuple
print(best_hidden_layers_list)
return best_hidden_layers_list,best_hidden_layers_tuple
#2Cii. Train Neural Network
def train_NN(X,Y,ind):
print('Neural Network')
#split the dataset into training set and testing set
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.33, random_state=0)
#preprocessing the data
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
num_training_sample = len(X_train)
best_hidden_layers_list,best_hidden_layers_tuple = grid_search(X_train, X_test, Y_train, Y_test,num_training_sample)
nn_clf = MLPClassifier(alpha=1e-5,
hidden_layer_sizes=best_hidden_layers_tuple, random_state=1)
#fit the training data to the model
nn_clf.fit(X_train,Y_train)
##metric 1: roc
Y_score_nn = nn_clf.predict(X_test)
fpr, tpr, thresholds = roc_curve(Y_test,Y_score_nn, pos_label=1)
roc_auc = auc(fpr,tpr)
lw=2
plt.figure()
plt.plot(fpr,tpr,color='darkorange',lw=lw,label='ROC curve (area = %0.2f)' %roc_auc)
plt.plot([0,1],[0,1],color='navy',lw=lw,linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic - Neural Network '+ind)
plt.legend(loc="lower right")
#plt.show()
plt.savefig(ind+'_NN.png')
##metric 2: Confusion matrix
Y_pred_tree = nn_clf.predict(X_test)
confusion_matrix_tree = confusion_matrix(Y_test, Y_pred_tree)
print(confusion_matrix_tree)
print(classification_report(Y_test, Y_pred_tree))
#common standard to compare across models
#f1_clf = f1_score(Y_test, Y_score_nn, average='binary')
f1_clf = f1_score(Y_test, Y_score_nn, average='weighted')
##save model
f_nn = open(ind+'_nn_clf_.pkl',"wb+")
pickle.dump(nn_clf, f_nn)
f_nn.close()
f_nn_sc = open(ind+'_nn_scaler.pkl',"wb+")
pickle.dump(scaler, f_nn_sc)
f_nn_sc.close()
return nn_clf, f1_clf
'''*************************************
3. Execute the program
#3a. filter the industry in scope
'''
groupby_fld = 'sicsector'
min_size = 30
df_tkr = pd.read_csv('industry_tickers_list.csv')
dict_ind_tkr = {}
f1_list = []
df_tkr_ind = pd.DataFrame()
df_tkr_ind['cnt'] = df_tkr.groupby(groupby_fld)['ticker'].count()
df_tkr_ind_select = df_tkr_ind[df_tkr_ind['cnt']>=min_size]
list_scope = list(df_tkr_ind_select.index)
#collect ticker in each industry
for index, row in df_tkr.iterrows():
ind = row[groupby_fld]
tkr = row['ticker']
if ind in list_scope:
if ind in dict_ind_tkr:
dict_ind_tkr[ind].append(tkr)
else:
dict_ind_tkr[ind] = [tkr]
#loop through the dictionary - one industry at a time
for ind, list_tkr in dict_ind_tkr.items():
df_X = pd.DataFrame({})
df_Y = pd.DataFrame({})
print(ind)
#Go through the ticker list to Download data from source
#loop through tickers from that industry
for tkr in list_tkr:
print(tkr)
try:
df_tmp,X_tmp,Y_tmp = download_tkr(tkr)
except Exception:
continue
if len(df_X)==0:
#df_all = df_tmp
df_X = X_tmp
df_Y = Y_tmp
else:
#df_all = pd.concat([df_all,df_tmp])
df_X = | pd.concat([df_X,X_tmp]) | pandas.concat |
import numpy as np
import pandas as pd
from powersimdata.input.input_data import InputData
from powersimdata.tests.mock_scenario import MockScenario
from postreise.analyze.transmission import congestion
mock_plant = {
"plant_id": ["A", "B", "C", "D"],
"bus_id": [1, 1, 2, 3],
}
mock_bus = {
"bus_id": [1, 2, 3, 4],
"Pd": [5, 6, 30, 1],
"zone_id": [1, 1, 1, 2],
}
grid_attrs = {"plant": mock_plant, "bus": mock_bus}
def _check_return(expected_return, surplus):
assert isinstance(surplus, pd.Series)
msg = "Time series indices don't match"
np.testing.assert_array_equal(
surplus.index.to_numpy(), expected_return.index.to_numpy(), msg
)
msg = "Values don't match expected"
np.testing.assert_array_equal(surplus.to_numpy(), expected_return.to_numpy(), msg)
def test_calculate_congestion_surplus_single_time(monkeypatch):
"""Congested case from Kirschen & Strbac Section 5.3.2.4"""
def mock_get_data(*args, **kwargs):
return demand
# Override default InputData.get_data method to avoid profile csv lookup
monkeypatch.setattr(InputData, "get_data", mock_get_data)
demand = pd.DataFrame({"UTC": ["t1"], 1: [410], 2: [0]})
lmp = pd.DataFrame({"UTC": ["t1"], 1: [7.5], 2: [11.25], 3: [10], 4: [0]})
pg = pd.DataFrame({"UTC": ["t1"], "A": [50], "B": [285], "C": [0], "D": [75]})
for df in (demand, lmp, pg):
df.set_index("UTC", inplace=True)
mock_scenario = MockScenario(grid_attrs, demand=demand, lmp=lmp, pg=pg)
expected_return = pd.Series(
data=[787.5],
index= | pd.date_range(start="2016-01-01", periods=1, freq="H") | pandas.date_range |
import pandas as pd
# just included for reference
# this can be improved by using loops, reading the dir for CSV files.
# read the created CSV files
df1 = pd.read_csv('1.csv')
print(df1.head())
print(len(df1))
df2 = pd.read_csv('2.csv')
print(df2.head())
print(len(df2))
df3 = pd.read_csv('3.csv')
print(df3.head())
print(len(df3))
print(f'total: {len(df1)+len(df2)+len(df3)}')
# generate a single combined CSV
df4 = pd.concat([df1, df2, df3])
print(df4.head())
combined_len = len(df4)
print(combined_len)
# drop duplicated entries if any
df4 = df4.drop_duplicates()
print(df4.head())
unique_len = len(df4)
print(unique_len)
print(f'unique% = {unique_len/combined_len*100}')
df4.to_csv('combined.csv', index=False)
# view the combined CSV with unique entries
df5 = | pd.read_csv('combined.csv') | pandas.read_csv |
import pandas as pd
import boto3
name = "-20210326"
metadata = "s3://ppmi-metadata/derived_tables/demog_ppmi_built_07042021.csv"
cst = True
cst = f's3://mjff-ppmi/volume_measures/direct_reg_seg_ppmi_volumes-mjff{name}-cst.csv'
dir_reg_seg = f's3://mjff-ppmi/volume_measures/direct_reg_seg_ppmi_volumes-mjff.csv'#{name}.csv'
randbasis = 's3://mjff-ppmi/superres-pipeline-mjff-randbasis/fullprojs.csv'
randbasis_df = | pd.read_csv(randbasis) | pandas.read_csv |
# Copyright (c) 2015-2020, Swiss Federal Institute of Technology (ETH Zurich)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""Core-activation RDP layer"""
import copy
import typing as t
import numpy as np
import pandas as pd
import scipy.interpolate
import scipy.signal
from exot.exceptions import *
from exot.util.misc import (
get_cores_and_schedules,
get_valid_access_paths,
getitem,
is_scalar_numeric,
)
from .._base import Layer
from .._mixins import RDPmixins
"""
CoreActivation
--------------
The CoreActivation performs the following:
- [__Encode__]: takes a 1-d line-encoded stream, creates timestamps using a
`subsymbol_rate` provided during execution, and produces a DataFrame with
a `timestamp` as the 1st column, followed by columns with post-processed streams
for each unique combination of `core_count` and `schedule_tag` for all configured
environments/apps/zones.
The post-processing maps line-encoded symbols using a symbol -> core-specifier
mapping. The core-specifier is simply a the symbol as an integer represeted as a
binary number.
The `saturating` parameter is used to configure the mode in which the layer
operates: for example, in saturating mode, the value of '2' is interpreted as
"2 active", resulting in a core-specifier 0b11, which is equal to 3. The saturating
operation is (2^x - 1), which produces binary numbers with all 1's (except for input
of '0'). All produced values are in the range [0, 2^cores - 1].
In non-saturating mode the values are simply put through. Negative input values of are
transformed into the (maximum saturating value + 1 - value). Checks are performed to
make sure that all values lie in the valid range.
Preconditions:
- Input is a 1-d array
Postconditions:
- Output is a 2-d DataFrame with timestamps as 1st column,
- [__Decode__]: takes a DataFrame with timestamps and values, oversamples the values in
order to produce 2-d array of specific width (at least 2 × subsymbol count of the
following line-coding layer), and outputs the reshaped values. After decoding, the
corresponding timestamps array is available via the `decode_timestamps` property.
Preconditions:
- Input is a 2-d DataFrame
- Input DataFrame has exactly 2 columns (timestamps + values)
Postconditions:
- Output is a NumPy array
- Output is at least twice and less than 4 times the subsymbol count
"""
class CoreActivation(RDPmixins, Layer, layer=Layer.Type.PrePost):
def __init__(
self,
*,
sampling_period: float,
environments_apps_zones: t.Mapping,
saturating: bool = True,
interpolation: str = "linear",
**kwargs,
):
"""Initialise the CoreActivation RDP layer
Args:
sampling_period (float): the sink app's sampling period
environments_apps_zones (t.Mapping): the env->app->zone mapping
saturating (bool, optional): is configured as saturating?
"""
self.interpolation = interpolation
self.sampling_period = sampling_period
self.environments_apps_zones = environments_apps_zones
self.saturating = saturating
self.cores_and_schedules = get_cores_and_schedules(self.environments_apps_zones)
@property
def _encode_types(self) -> t.Tuple[type]:
return (np.ndarray,)
@property
def _decode_types(self) -> t.Tuple[type]:
return (pd.DataFrame,)
@property
def _encode_validators(self):
return {np.ndarray: [lambda v: v.ndim == 1]}
@property
def _encode_output_validators(self):
return {
pd.DataFrame: [
lambda v: v.ndim == 2,
lambda v: v.shape[1] == 1 + len(self.cores_and_schedules),
]
}
@property
def _decode_validators(self):
# Accepts: timestamps + single value
return {pd.DataFrame: [lambda v: v.ndim == 2, lambda v: v.shape[1] == 2]}
@property
def _decode_output_validators(self):
# Accepts: timestamps + single value
return {pd.DataFrame: [lambda v: v.ndim == 2, lambda v: v.shape[1] >= 2]}
@property
def requires_runtime_config(self) -> (bool, bool):
"""Does the layer's (encode, decode) require runtime configuration?"""
return (True, True)
@property
def required_config_keys(self):
"""The required config keys
Implements the `required_config_keys` from Configurable base class
"""
return ["symbol_rate", "subsymbol_rate"]
def validate(self) -> t.NoReturn:
"""Implementation of Configurable's `validate`"""
if not is_scalar_numeric(self.config.subsymbol_rate):
raise MisconfiguredError("subsymbol_rate must be a numeric value")
if not is_scalar_numeric(self.config.symbol_rate):
raise MisconfiguredError("symbol_rate must be a numeric value")
@property
def sampling_period(self):
"""Get the sampling period"""
return self._sampling_period
@sampling_period.setter
def sampling_period(self, value):
"""Set the sampling period"""
if not is_scalar_numeric(value):
raise LayerMisconfigured("sampling_period must be an integer of float")
self._sampling_period = value
@property
def saturating(self):
"""Is the layer operating in the saturating mode?"""
return self._saturating
@saturating.setter
def saturating(self, value):
"""Set the saturating property"""
if not isinstance(value, (bool, np.bool)):
raise LayerMisconfigured(f"'saturating' must be a boolean, got: {value}")
self._saturating = value
@property
def timing_interpolator(self) -> t.Optional[t.Callable]:
"""Get the timing interpolator
Returns:
t.Optional[t.Callable]: the interpolator function, if available
"""
return getattr(self, "_timing_interpolator", None)
@property
def values_interpolator(self) -> t.Optional[t.Callable]:
"""Get the values interpolator
Returns:
t.Optional[t.Callable]: the interpolator function, if available
"""
return getattr(self, "_values_interpolator", None)
@staticmethod
def _upper_limit(core_count: np.integer) -> np.integer:
"""Get the upper limit for valid core specifiers
Args:
core_count (np.integer): the core count
Returns:
np.integer: the upper limit
"""
assert isinstance(core_count, (int, np.integer)), "core_count must be an int"
return 2 ** core_count - 1
def _apply_mapping(self, stream: np.ndarray, core_count: int) -> np.ndarray:
"""Apply a core-specifier mapping to an input stream
Args:
stream (np.ndarray): the 1-d lnestream
core_count (int): the core count
Returns:
np.ndarray: a validated and optionally saturated stream
Raises:
ValueValidationFailed: if any of the values is not within [0, 2^core_count -1]
"""
# If operating in the 'saturating' mode
if self.saturating:
# If a negative value is encountered in the stream, replace it with the max,
# which is (core_count), and subtract (value + 1). For example, with a core
# count of 4, '-1' will yield '4', '-2' will yield '3', and so on.
if (stream < 0).any():
stream[stream < 0] = core_count - (stream[stream < 0] + 1)
stream = (2 ** stream - 1).astype(int)
_ = self._upper_limit(core_count)
if (stream < 0).any() or (stream > _).any():
raise ValueValidationFailed(
f"some values in the mapped stream for core_count of {core_count} were "
f"out of range [0, {_}]"
)
return stream
def _encode(self, lnestream: np.ndarray) -> pd.DataFrame:
"""Encode a lnestream for each core/schedule pair
Args:
lnestream (np.ndarray): the line-encoded stream from an LNE coder
Returns:
pd.DataFrame: a DataFrame with a timestamps and value columns named after tags
"""
tag_count = len(self.cores_and_schedules)
rdpstream = np.empty((lnestream.shape[0], tag_count), dtype=np.dtype("int"))
timestamps = np.full(lnestream.shape[0], 1 / self.config.subsymbol_rate)
tags = []
for idx, (core_count, tag) in enumerate(self.cores_and_schedules):
tags.append(tag)
rdpstream[:, idx] = self._apply_mapping(lnestream.copy(), core_count)
return pd.DataFrame.join(
pd.DataFrame(timestamps, columns=["timestamp"]),
| pd.DataFrame(rdpstream, columns=tags) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Time class.
"""
__all__ = ['Time']
class Time(object):
"""Class that defines the time of a unit or model
Examples::
>>> th = Time('2017-01-01', '2017-01-02', freq = '2H')
>>> th.dt
7200.0
.. _tab_tag_freq:
.. table:: Freq available tags in Pandas
====== ==============================================
Tag Description
====== ==============================================
B business day frequency
C custom business day frequency (experimental)
D calendar day frequency
W weekly frequency
M month end frequency
BM business month end frequency
CBM custom business month end frequency
MS month start frequency
BMS business month start frequency
CBMS custom business month start frequency
Q quarter end frequency
BQ business quarter endfrequency
QS quarter start frequency
BQS business quarter start frequency
A year end frequency
BA business year end frequency
AS year start frequency
BAS business year start frequency
BH business hour frequency
H hourly frequency
T, min minutely frequency
S secondly frequency
L, ms milliseonds
U, us microseconds
N nanoseconds
====== ==============================================
"""
def __init__(self, start=None, end=None, freq=None, *args, **kwds):
"""
:param args:
:param kwds:
:param start: starting value, datetime-like
:param end: end time, datetime-like
:param freq: string or pandas offset object
"""
super().__init__(*args, **kwds)
import pandas as pd
from numpy import linspace
from pandas.tseries.frequencies import to_offset
if freq is None:
freq = 'H'
self.freq = freq
if isinstance(start, str) and isinstance(end, str):
try:
self.start = pd.Timestamp(start)
self.end = | pd.Timestamp(end) | pandas.Timestamp |
import pytest
import numpy as np
import pandas
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
import matplotlib
import modin.pandas as pd
from modin.pandas.utils import to_pandas
from numpy.testing import assert_array_equal
from .utils import (
random_state,
RAND_LOW,
RAND_HIGH,
df_equals,
df_is_empty,
arg_keys,
name_contains,
test_data_values,
test_data_keys,
test_data_with_duplicates_values,
test_data_with_duplicates_keys,
numeric_dfs,
no_numeric_dfs,
test_func_keys,
test_func_values,
query_func_keys,
query_func_values,
agg_func_keys,
agg_func_values,
numeric_agg_funcs,
quantiles_keys,
quantiles_values,
indices_keys,
indices_values,
axis_keys,
axis_values,
bool_arg_keys,
bool_arg_values,
int_arg_keys,
int_arg_values,
)
# TODO remove once modin-project/modin#469 is resolved
agg_func_keys.remove("str")
agg_func_values.remove(str)
pd.DEFAULT_NPARTITIONS = 4
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
class TestDFPartOne:
# Test inter df math functions
def inter_df_math_helper(self, modin_df, pandas_df, op):
# Test dataframe to datframe
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
# Test dataframe to int
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
# Test dataframe to float
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
# Test transposed dataframes to float
try:
pandas_result = getattr(pandas_df.T, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df.T, op)(4.0)
else:
modin_result = getattr(modin_df.T, op)(4.0)
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
# Test dataframe to different dataframe shape
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
# Test dataframe to list
list_test = random_state.randint(RAND_LOW, RAND_HIGH, size=(modin_df.shape[1]))
try:
pandas_result = getattr(pandas_df, op)(list_test, axis=1)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(list_test, axis=1)
else:
modin_result = getattr(modin_df, op)(list_test, axis=1)
df_equals(modin_result, pandas_result)
# Test dataframe to series
series_test_modin = modin_df[modin_df.columns[0]]
series_test_pandas = pandas_df[pandas_df.columns[0]]
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Test dataframe to series with different index
series_test_modin = modin_df[modin_df.columns[0]].reset_index(drop=True)
series_test_pandas = pandas_df[pandas_df.columns[0]].reset_index(drop=True)
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Level test
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "add")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_div(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "div")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_divide(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "divide")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_floordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "floordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_multiply(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "multiply")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pow(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive
# values
try:
pandas_df = pandas_df.abs()
except Exception:
pass
else:
modin_df = modin_df.abs()
self.inter_df_math_helper(modin_df, pandas_df, "pow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_sub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "sub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_subtract(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "subtract")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_truediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "truediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___div__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__div__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___add__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__add__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___radd__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__radd__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___pow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__pow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rpow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rpow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___sub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__sub__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___floordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__floordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rfloordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rfloordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___truediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__truediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rtruediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rtruediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rdiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rdiv__")
# END test inter df math functions
# Test comparison of inter operation functions
def comparison_inter_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)("a")
except TypeError:
with pytest.raises(TypeError):
repr(getattr(modin_df, op)("a"))
else:
modin_result = getattr(modin_df, op)("a")
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_eq(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "eq")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ge(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ge")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_gt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "gt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_le(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "le")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_lt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "lt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ne(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ne")
# END test comparison of inter operation functions
# Test dataframe right operations
def inter_df_math_right_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_radd(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "radd")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rdiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rdiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rfloordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rfloordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rpow(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive values
# We need to check that negative integers are not used efficiently
if "100x100" not in request.node.name:
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rpow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rsub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rsub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rtruediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rtruediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rsub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "__rsub__")
# END test dataframe right operations
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_abs(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.abs()
except Exception as e:
with pytest.raises(type(e)):
modin_df.abs()
else:
modin_result = modin_df.abs()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_prefix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_prefix = "TEST"
new_modin_df = modin_df.add_prefix(test_prefix)
new_pandas_df = pandas_df.add_prefix(test_prefix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
x = 2
modin_df.applymap(x)
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap_numeric(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_suffix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_suffix = "TEST"
new_modin_df = modin_df.add_suffix(test_suffix)
new_pandas_df = pandas_df.add_suffix(test_suffix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_at(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# We skip nan datasets because nan != nan
if "nan" not in request.node.name:
key1 = modin_df.columns[0]
# Scaler
assert modin_df.at[0, key1] == pandas_df.at[0, key1]
# Series
df_equals(modin_df.loc[0].at[key1], pandas_df.loc[0].at[key1])
# Write Item
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.at[1, key1] = modin_df.at[0, key1]
pandas_df_copy.at[1, key1] = pandas_df.at[0, key1]
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_axes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
for modin_axis, pd_axis in zip(modin_df.axes, pandas_df.axes):
assert np.array_equal(modin_axis, pd_axis)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_copy(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused but there so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
new_modin_df = modin_df.copy()
assert new_modin_df is not modin_df
assert np.array_equal(
new_modin_df._query_compiler._modin_frame._partitions,
modin_df._query_compiler._modin_frame._partitions,
)
assert new_modin_df is not modin_df
df_equals(new_modin_df, modin_df)
# Shallow copy tests
modin_df = pd.DataFrame(data)
modin_df_cp = modin_df.copy(False)
modin_df[modin_df.columns[0]] = 0
df_equals(modin_df, modin_df_cp)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dtypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.dtypes, pandas_df.dtypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ftypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.ftypes, pandas_df.ftypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("key", indices_values, ids=indices_keys)
def test_get(self, data, key):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get(key), pandas_df.get(key))
df_equals(
modin_df.get(key, default="default"), pandas_df.get(key, default="default")
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_dtype_counts(self, data):
modin_result = pd.DataFrame(data).get_dtype_counts().sort_index()
pandas_result = pandas.DataFrame(data).get_dtype_counts().sort_index()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"dummy_na", bool_arg_values, ids=arg_keys("dummy_na", bool_arg_keys)
)
@pytest.mark.parametrize(
"drop_first", bool_arg_values, ids=arg_keys("drop_first", bool_arg_keys)
)
def test_get_dummies(self, request, data, dummy_na, drop_first):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas.get_dummies(
pandas_df, dummy_na=dummy_na, drop_first=drop_first
)
except Exception as e:
with pytest.raises(type(e)):
pd.get_dummies(modin_df, dummy_na=dummy_na, drop_first=drop_first)
else:
modin_result = pd.get_dummies(
modin_df, dummy_na=dummy_na, drop_first=drop_first
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_ftype_counts(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get_ftype_counts(), pandas_df.get_ftype_counts())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg(self, data, axis, func):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg_numeric(self, request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.aggregate(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.aggregate(func, axis)
else:
modin_result = modin_df.aggregate(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate_numeric(self, request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_aggregate_error_checking(self, data):
modin_df = pd.DataFrame(data)
assert modin_df.aggregate("ndim") == 2
with pytest.warns(UserWarning):
modin_df.aggregate(
{modin_df.columns[0]: "sum", modin_df.columns[1]: "mean"}
)
with pytest.warns(UserWarning):
modin_df.aggregate("cumproduct")
with pytest.raises(ValueError):
modin_df.aggregate("NOT_EXISTS")
def test_align(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).align(pd.DataFrame(data))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"bool_only", bool_arg_values, ids=arg_keys("bool_only", bool_arg_keys)
)
def test_all(self, data, axis, skipna, bool_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# Test when axis is None. This will get repeated but easier than using list in parameterize decorator
try:
pandas_result = pandas_df.all(axis=None, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.all(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.all(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.all(
axis=axis, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.all(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.all(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# Test when axis is None. This will get repeated but easier than using list in parameterize decorator
try:
pandas_result = pandas_df.T.all(
axis=None, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.all(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.all(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
for level in list(range(levels)) + (axis_names if axis_names else []):
try:
pandas_multi_level_result = pandas_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
else:
modin_multi_level_result = modin_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"bool_only", bool_arg_values, ids=arg_keys("bool_only", bool_arg_keys)
)
def test_any(self, data, axis, skipna, bool_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.any(axis=None, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.any(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.any(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.any(
axis=axis, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.any(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.any(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.any(
axis=None, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.any(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.any(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
for level in list(range(levels)) + (axis_names if axis_names else []):
try:
pandas_multi_level_result = pandas_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
else:
modin_multi_level_result = modin_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_append(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
data_to_append = {"append_a": 2, "append_b": 1000}
ignore_idx_values = [True, False]
for ignore in ignore_idx_values:
try:
pandas_result = pandas_df.append(data_to_append, ignore_index=ignore)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(data_to_append, ignore_index=ignore)
else:
modin_result = modin_df.append(data_to_append, ignore_index=ignore)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(pandas_df.iloc[-1])
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df.iloc[-1])
else:
modin_result = modin_df.append(modin_df.iloc[-1])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(list(pandas_df.iloc[-1]))
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(list(modin_df.iloc[-1]))
else:
modin_result = modin_df.append(list(modin_df.iloc[-1]))
df_equals(modin_result, pandas_result)
verify_integrity_values = [True, False]
for verify_integrity in verify_integrity_values:
try:
pandas_result = pandas_df.append(
[pandas_df, pandas_df], verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
else:
modin_result = modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(
pandas_df, verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df, verify_integrity=verify_integrity)
else:
modin_result = modin_df.append(
modin_df, verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_apply(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(TypeError):
modin_df.apply({"row": func}, axis=1)
try:
pandas_result = pandas_df.apply(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.apply(func, axis)
else:
modin_result = modin_df.apply(func, axis)
df_equals(modin_result, pandas_result)
def test_apply_metadata(self):
def add(a, b, c):
return a + b + c
data = {"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}
modin_df = pd.DataFrame(data)
modin_df["add"] = modin_df.apply(
lambda row: add(row["A"], row["B"], row["C"]), axis=1
)
pandas_df = pandas.DataFrame(data)
pandas_df["add"] = pandas_df.apply(
lambda row: add(row["A"], row["B"], row["C"]), axis=1
)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_apply_numeric(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.apply(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.apply(func, axis)
else:
modin_result = modin_df.apply(func, axis)
df_equals(modin_result, pandas_result)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
modin_result = modin_df.apply(lambda df: df.drop(key), axis=1)
pandas_result = pandas_df.apply(lambda df: df.drop(key), axis=1)
df_equals(modin_result, pandas_result)
def test_as_blocks(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).as_blocks()
def test_as_matrix(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
mat = frame.as_matrix()
frame_columns = frame.columns
for i, row in enumerate(mat):
for j, value in enumerate(row):
col = frame_columns[j]
if np.isnan(value):
assert np.isnan(frame[col][i])
else:
assert value == frame[col][i]
# mixed type
mat = pd.DataFrame(test_data.mixed_frame).as_matrix(["foo", "A"])
assert mat[0, 0] == "bar"
df = pd.DataFrame({"real": [1, 2, 3], "complex": [1j, 2j, 3j]})
mat = df.as_matrix()
assert mat[0, 1] == 1j
# single block corner case
mat = pd.DataFrame(test_data.frame).as_matrix(["A", "B"])
expected = test_data.frame.reindex(columns=["A", "B"]).values
tm.assert_almost_equal(mat, expected)
def test_to_numpy(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
assert_array_equal(frame.values, test_data.frame.values)
def test_partition_to_numpy(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
for (
partition
) in frame._query_compiler._modin_frame._partitions.flatten().tolist():
assert_array_equal(partition.to_pandas().values, partition.to_numpy())
def test_asfreq(self):
index = pd.date_range("1/1/2000", periods=4, freq="T")
series = pd.Series([0.0, None, 2.0, 3.0], index=index)
df = pd.DataFrame({"s": series})
with pytest.warns(UserWarning):
# We are only testing that this defaults to pandas, so we will just check for
# the warning
df.asfreq(freq="30S")
def test_asof(self):
df = pd.DataFrame(
{"a": [10, 20, 30, 40, 50], "b": [None, None, None, None, 500]},
index=pd.DatetimeIndex(
[
"2018-02-27 09:01:00",
"2018-02-27 09:02:00",
"2018-02-27 09:03:00",
"2018-02-27 09:04:00",
"2018-02-27 09:05:00",
]
),
)
with pytest.warns(UserWarning):
df.asof(pd.DatetimeIndex(["2018-02-27 09:03:30", "2018-02-27 09:04:30"]))
def test_assign(self):
data = test_data_values[0]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.warns(UserWarning):
modin_result = modin_df.assign(new_column=pd.Series(modin_df.iloc[:, 0]))
pandas_result = pandas_df.assign(new_column=pd.Series(pandas_df.iloc[:, 0]))
df_equals(modin_result, pandas_result)
def test_astype(self):
td = TestData()
modin_df = pd.DataFrame(
td.frame.values, index=td.frame.index, columns=td.frame.columns
)
expected_df = pandas.DataFrame(
td.frame.values, index=td.frame.index, columns=td.frame.columns
)
modin_df_casted = modin_df.astype(np.int32)
expected_df_casted = expected_df.astype(np.int32)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(np.float64)
expected_df_casted = expected_df.astype(np.float64)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(str)
expected_df_casted = expected_df.astype(str)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype("category")
expected_df_casted = expected_df.astype("category")
df_equals(modin_df_casted, expected_df_casted)
dtype_dict = {"A": np.int32, "B": np.int64, "C": str}
modin_df_casted = modin_df.astype(dtype_dict)
expected_df_casted = expected_df.astype(dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
# Ignore lint because this is testing bad input
bad_dtype_dict = {"B": np.int32, "B": np.int64, "B": str} # noqa F601
modin_df_casted = modin_df.astype(bad_dtype_dict)
expected_df_casted = expected_df.astype(bad_dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
with pytest.raises(KeyError):
modin_df.astype({"not_exists": np.uint8})
def test_astype_category(self):
modin_df = pd.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
pandas_df = pandas.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
modin_result = modin_df.astype({"col1": "category"})
pandas_result = pandas_df.astype({"col1": "category"})
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
modin_result = modin_df.astype("category")
pandas_result = pandas_df.astype("category")
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
def test_at_time(self):
i = pd.date_range("2018-04-09", periods=4, freq="12H")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.at_time("12:00")
def test_between_time(self):
i = pd.date_range("2018-04-09", periods=4, freq="12H")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.between_time("0:15", "0:45")
def test_bfill(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(modin_df.bfill(), test_data.tsframe.bfill())
def test_blocks(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).blocks
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_bool(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(ValueError):
modin_df.bool()
modin_df.__bool__()
single_bool_pandas_df = pandas.DataFrame([True])
single_bool_modin_df = pd.DataFrame([True])
assert single_bool_pandas_df.bool() == single_bool_modin_df.bool()
with pytest.raises(ValueError):
# __bool__ always raises this error for DataFrames
single_bool_modin_df.__bool__()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_boxplot(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
assert modin_df.boxplot() == to_pandas(modin_df).boxplot()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower, upper = np.sort(random_state.random_integers(RAND_LOW, RAND_HIGH, 2))
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test only upper scalar bound
modin_result = modin_df.clip(None, upper, axis=axis)
pandas_result = pandas_df.clip(None, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper scalar bound
modin_result = modin_df.clip(lower, upper, axis=axis)
pandas_result = pandas_df.clip(lower, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper list bound on each column
modin_result = modin_df.clip(lower_list, upper_list, axis=axis)
pandas_result = pandas_df.clip(lower_list, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
# test only upper list bound on each column
modin_result = modin_df.clip(np.nan, upper_list, axis=axis)
pandas_result = pandas_df.clip(np.nan, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
with pytest.raises(ValueError):
modin_df.clip(lower=[1, 2, 3], axis=None)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip_lower(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower = random_state.random_integers(RAND_LOW, RAND_HIGH, 1)[0]
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test lower scalar bound
pandas_result = pandas_df.clip_lower(lower, axis=axis)
modin_result = modin_df.clip_lower(lower, axis=axis)
df_equals(modin_result, pandas_result)
# test lower list bound on each column
pandas_result = pandas_df.clip_lower(lower_list, axis=axis)
modin_result = modin_df.clip_lower(lower_list, axis=axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip_upper(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
upper = random_state.random_integers(RAND_LOW, RAND_HIGH, 1)[0]
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test upper scalar bound
modin_result = modin_df.clip_upper(upper, axis=axis)
pandas_result = pandas_df.clip_upper(upper, axis=axis)
df_equals(modin_result, pandas_result)
# test upper list bound on each column
modin_result = modin_df.clip_upper(upper_list, axis=axis)
pandas_result = pandas_df.clip_upper(upper_list, axis=axis)
df_equals(modin_result, pandas_result)
def test_combine(self):
df1 = pd.DataFrame({"A": [0, 0], "B": [4, 4]})
df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
with pytest.warns(UserWarning):
df1.combine(df2, lambda s1, s2: s1 if s1.sum() < s2.sum() else s2)
def test_combine_first(self):
df1 = pd.DataFrame({"A": [None, 0], "B": [None, 4]})
df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
with pytest.warns(UserWarning):
df1.combine_first(df2)
def test_compound(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).compound()
def test_corr(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).corr()
def test_corrwith(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).corrwith(pd.DataFrame(data))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_count(self, request, data, axis, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.count(axis=axis, numeric_only=numeric_only)
pandas_result = pandas_df.count(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.count(axis=axis, numeric_only=numeric_only)
pandas_result = pandas_df.T.count(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
try: # test error
pandas_df_multi_level.count(
axis=1, numeric_only=numeric_only, level=0
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.count(
axis=1, numeric_only=numeric_only, level=0
)
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
try: # test error
pandas_df_multi_level.count(
axis=0, numeric_only=numeric_only, level=0
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.count(
axis=0, numeric_only=numeric_only, level=0
)
for level in list(range(levels)) + (axis_names if axis_names else []):
modin_multi_level_result = modin_df_multi_level.count(
axis=axis, numeric_only=numeric_only, level=level
)
pandas_multi_level_result = pandas_df_multi_level.count(
axis=axis, numeric_only=numeric_only, level=level
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
def test_cov(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).cov()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummax(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cummax(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cummax(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cummax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cummax(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cummax(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cummax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummin(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cummin(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cummin(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cummin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cummin(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cummin(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cummin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumprod(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cumprod(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cumprod(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cumprod(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cumprod(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cumprod(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cumprod(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumsum(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# pandas exhibits weird behavior for this case
# Remove this case when we can pull the error messages from backend
if name_contains(request.node.name, ["datetime_timedelta_data"]) and (
axis == 0 or axis == "rows"
):
with pytest.raises(TypeError):
modin_df.cumsum(axis=axis, skipna=skipna)
else:
try:
pandas_result = pandas_df.cumsum(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cumsum(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cumsum(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
if name_contains(request.node.name, ["datetime_timedelta_data"]) and (
axis == 0 or axis == "rows"
):
with pytest.raises(TypeError):
modin_df.T.cumsum(axis=axis, skipna=skipna)
else:
try:
pandas_result = pandas_df.T.cumsum(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cumsum(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cumsum(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_describe(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.describe(), pandas_df.describe())
percentiles = [0.10, 0.11, 0.44, 0.78, 0.99]
df_equals(
modin_df.describe(percentiles=percentiles),
pandas_df.describe(percentiles=percentiles),
)
try:
pandas_result = pandas_df.describe(exclude=[np.float64])
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(exclude=[np.float64])
else:
modin_result = modin_df.describe(exclude=[np.float64])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.describe(exclude=np.float64)
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(exclude=np.float64)
else:
modin_result = modin_df.describe(exclude=np.float64)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
else:
modin_result = modin_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
df_equals(modin_result, pandas_result)
modin_result = modin_df.describe(include=str(modin_df.dtypes.values[0]))
pandas_result = pandas_df.describe(include=str(pandas_df.dtypes.values[0]))
df_equals(modin_result, pandas_result)
modin_result = modin_df.describe(include=[np.number])
pandas_result = pandas_df.describe(include=[np.number])
df_equals(modin_result, pandas_result)
df_equals(modin_df.describe(include="all"), pandas_df.describe(include="all"))
modin_df = pd.DataFrame(data).applymap(str)
pandas_df = pandas.DataFrame(data).applymap(str)
try:
df_equals(modin_df.describe(), pandas_df.describe())
except AssertionError:
# We have to do this because we choose the highest count slightly differently
# than pandas. Because there is no true guarantee which one will be first,
# If they don't match, make sure that the `freq` is the same at least.
df_equals(
modin_df.describe().loc[["count", "unique", "freq"]],
pandas_df.describe().loc[["count", "unique", "freq"]],
)
def test_describe_dtypes(self):
modin_df = pd.DataFrame(
{
"col1": list("abc"),
"col2": list("abc"),
"col3": list("abc"),
"col4": [1, 2, 3],
}
)
pandas_df = pandas.DataFrame(
{
"col1": list("abc"),
"col2": list("abc"),
"col3": list("abc"),
"col4": [1, 2, 3],
}
)
modin_result = modin_df.describe()
pandas_result = pandas_df.describe()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"periods", int_arg_values, ids=arg_keys("periods", int_arg_keys)
)
def test_diff(self, request, data, axis, periods):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.diff(axis=axis, periods=periods)
except Exception as e:
with pytest.raises(type(e)):
modin_df.diff(axis=axis, periods=periods)
else:
modin_result = modin_df.diff(axis=axis, periods=periods)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.diff(axis=axis, periods=periods)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.diff(axis=axis, periods=periods)
else:
modin_result = modin_df.T.diff(axis=axis, periods=periods)
df_equals(modin_result, pandas_result)
def test_drop(self):
frame_data = {"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]}
simple = pandas.DataFrame(frame_data)
modin_simple = pd.DataFrame(frame_data)
df_equals(modin_simple.drop("A", axis=1), simple[["B"]])
df_equals(modin_simple.drop(["A", "B"], axis="columns"), simple[[]])
df_equals(modin_simple.drop([0, 1, 3], axis=0), simple.loc[[2], :])
df_equals(modin_simple.drop([0, 3], axis="index"), simple.loc[[1, 2], :])
pytest.raises(ValueError, modin_simple.drop, 5)
pytest.raises(ValueError, modin_simple.drop, "C", 1)
pytest.raises(ValueError, modin_simple.drop, [1, 5])
pytest.raises(ValueError, modin_simple.drop, ["A", "C"], 1)
# errors = 'ignore'
df_equals(modin_simple.drop(5, errors="ignore"), simple)
df_equals(modin_simple.drop([0, 5], errors="ignore"), simple.loc[[1, 2, 3], :])
df_equals(modin_simple.drop("C", axis=1, errors="ignore"), simple)
df_equals(modin_simple.drop(["A", "C"], axis=1, errors="ignore"), simple[["B"]])
# non-unique
nu_df = pandas.DataFrame(
zip(range(3), range(-3, 1), list("abc")), columns=["a", "a", "b"]
)
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("a", axis=1), nu_df[["b"]])
df_equals(modin_nu_df.drop("b", axis="columns"), nu_df["a"])
df_equals(modin_nu_df.drop([]), nu_df)
nu_df = nu_df.set_index(pandas.Index(["X", "Y", "X"]))
nu_df.columns = list("abc")
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("X", axis="rows"), nu_df.loc[["Y"], :])
df_equals(modin_nu_df.drop(["X", "Y"], axis=0), nu_df.loc[[], :])
# inplace cache issue
frame_data = random_state.randn(10, 3)
df = pandas.DataFrame(frame_data, columns=list("abc"))
modin_df = pd.DataFrame(frame_data, columns=list("abc"))
expected = df[~(df.b > 0)]
modin_df.drop(labels=df[df.b > 0].index, inplace=True)
df_equals(modin_df, expected)
midx = pd.MultiIndex(
levels=[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
df = pd.DataFrame(
index=midx,
columns=["big", "small"],
data=[
[45, 30],
[200, 100],
[1.5, 1],
[30, 20],
[250, 150],
[1.5, 0.8],
[320, 250],
[1, 0.8],
[0.3, 0.2],
],
)
with pytest.warns(UserWarning):
df.drop(index="length", level=1)
def test_drop_api_equivalence(self):
# equivalence of the labels/axis and index/columns API's
frame_data = [[1, 2, 3], [3, 4, 5], [5, 6, 7]]
modin_df = pd.DataFrame(
frame_data, index=["a", "b", "c"], columns=["d", "e", "f"]
)
modin_df1 = modin_df.drop("a")
modin_df2 = modin_df.drop(index="a")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop("d", 1)
modin_df2 = modin_df.drop(columns="d")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(labels="e", axis=1)
modin_df2 = modin_df.drop(columns="e")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0)
modin_df2 = modin_df.drop(index=["a"])
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0).drop(["d"], axis=1)
modin_df2 = modin_df.drop(index=["a"], columns=["d"])
df_equals(modin_df1, modin_df2)
with pytest.raises(ValueError):
modin_df.drop(labels="a", index="b")
with pytest.raises(ValueError):
modin_df.drop(labels="a", columns="b")
with pytest.raises(ValueError):
modin_df.drop(axis=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_drop_transpose(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.T.drop(columns=[0, 1, 2])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(index=["col3", "col1"])
pandas_result = pandas_df.T.drop(index=["col3", "col1"])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
df_equals(modin_result, pandas_result)
def test_droplevel(self):
df = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis(["a", "b"])
)
df.columns = pd.MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
with pytest.warns(UserWarning):
df.droplevel("a")
with pytest.warns(UserWarning):
df.droplevel("level_2", axis=1)
@pytest.mark.parametrize(
"data", test_data_with_duplicates_values, ids=test_data_with_duplicates_keys
)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
@pytest.mark.parametrize(
"subset", [None, ["col1", "col3", "col7"]], ids=["None", "subset"]
)
def test_drop_duplicates(self, data, keep, subset):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.drop_duplicates(keep=keep, inplace=False, subset=subset),
pandas_df.drop_duplicates(keep=keep, inplace=False, subset=subset),
)
modin_results = modin_df.drop_duplicates(keep=keep, inplace=True, subset=subset)
pandas_results = pandas_df.drop_duplicates(
keep=keep, inplace=True, subset=subset
)
df_equals(modin_results, pandas_results)
def test_drop_duplicates_with_missing_index_values(self):
data = {
"columns": ["value", "time", "id"],
"index": [
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
20,
21,
22,
23,
24,
25,
26,
27,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
],
"data": [
["3", 1279213398000.0, 88.0],
["3", 1279204682000.0, 88.0],
["0", 1245772835000.0, 448.0],
["0", 1270564258000.0, 32.0],
["0", 1267106669000.0, 118.0],
["7", 1300621123000.0, 5.0],
["0", 1251130752000.0, 957.0],
["0", 1311683506000.0, 62.0],
["9", 1283692698000.0, 89.0],
["9", 1270234253000.0, 64.0],
["0", 1285088818000.0, 50.0],
["0", 1218212725000.0, 695.0],
["2", 1383933968000.0, 348.0],
["0", 1368227625000.0, 257.0],
["1", 1454514093000.0, 446.0],
["1", 1428497427000.0, 134.0],
["1", 1459184936000.0, 568.0],
["1", 1502293302000.0, 599.0],
["1", 1491833358000.0, 829.0],
["1", 1485431534000.0, 806.0],
["8", 1351800505000.0, 101.0],
["0", 1357247721000.0, 916.0],
["0", 1335804423000.0, 370.0],
["24", 1327547726000.0, 720.0],
["0", 1332334140000.0, 415.0],
["0", 1309543100000.0, 30.0],
["18", 1309541141000.0, 30.0],
["0", 1298979435000.0, 48.0],
["14", 1276098160000.0, 59.0],
["0", 1233936302000.0, 109.0],
],
}
pandas_df = pandas.DataFrame(
data["data"], index=data["index"], columns=data["columns"]
)
modin_df = pd.DataFrame(
data["data"], index=data["index"], columns=data["columns"]
)
modin_result = modin_df.sort_values(["id", "time"]).drop_duplicates(["id"])
pandas_result = pandas_df.sort_values(["id", "time"]).drop_duplicates(["id"])
df_equals(modin_result, pandas_result)
def test_drop_duplicates_after_sort(self):
data = [
{"value": 1, "time": 2},
{"value": 1, "time": 1},
{"value": 2, "time": 1},
{"value": 2, "time": 2},
]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.sort_values(["value", "time"]).drop_duplicates(
["value"]
)
pandas_result = pandas_df.sort_values(["value", "time"]).drop_duplicates(
["value"]
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("how", ["any", "all"], ids=["any", "all"])
def test_dropna(self, data, axis, how):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.dropna(axis=axis, how="invalid")
with pytest.raises(TypeError):
modin_df.dropna(axis=axis, how=None, thresh=None)
with pytest.raises(KeyError):
modin_df.dropna(axis=axis, subset=["NotExists"], how=how)
modin_result = modin_df.dropna(axis=axis, how=how)
pandas_result = pandas_df.dropna(axis=axis, how=how)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_inplace(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.dropna()
modin_df.dropna(inplace=True)
df_equals(modin_df, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(thresh=2, inplace=True)
modin_df.dropna(thresh=2, inplace=True)
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(axis=1, how="any", inplace=True)
modin_df.dropna(axis=1, how="any", inplace=True)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_multiple_axes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.dropna(how="all", axis=[0, 1]),
pandas_df.dropna(how="all", axis=[0, 1]),
)
df_equals(
modin_df.dropna(how="all", axis=(0, 1)),
pandas_df.dropna(how="all", axis=(0, 1)),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_multiple_axes_inplace(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.dropna(how="all", axis=[0, 1], inplace=True)
pandas_df_copy.dropna(how="all", axis=[0, 1], inplace=True)
df_equals(modin_df_copy, pandas_df_copy)
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.dropna(how="all", axis=(0, 1), inplace=True)
pandas_df_copy.dropna(how="all", axis=(0, 1), inplace=True)
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_subset(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
column_subset = modin_df.columns[0:2]
df_equals(
modin_df.dropna(how="all", subset=column_subset),
pandas_df.dropna(how="all", subset=column_subset),
)
df_equals(
modin_df.dropna(how="any", subset=column_subset),
pandas_df.dropna(how="any", subset=column_subset),
)
row_subset = modin_df.index[0:2]
df_equals(
modin_df.dropna(how="all", axis=1, subset=row_subset),
pandas_df.dropna(how="all", axis=1, subset=row_subset),
)
df_equals(
modin_df.dropna(how="any", axis=1, subset=row_subset),
pandas_df.dropna(how="any", axis=1, subset=row_subset),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_subset_error(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
with pytest.raises(KeyError):
modin_df.dropna(subset=list("EF"))
if len(modin_df.columns) < 5:
with pytest.raises(KeyError):
modin_df.dropna(axis=1, subset=[4, 5])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dot(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
col_len = len(modin_df.columns)
# Test list input
arr = np.arange(col_len)
modin_result = modin_df.dot(arr)
pandas_result = pandas_df.dot(arr)
df_equals(modin_result, pandas_result)
# Test bad dimensions
with pytest.raises(ValueError):
modin_result = modin_df.dot(np.arange(col_len + 10))
# Test series input
modin_series = pd.Series(np.arange(col_len), index=modin_df.columns)
pandas_series = pandas.Series(np.arange(col_len), index=modin_df.columns)
modin_result = modin_df.dot(modin_series)
pandas_result = pandas_df.dot(pandas_series)
df_equals(modin_result, pandas_result)
# Test when input series index doesn't line up with columns
with pytest.raises(ValueError):
modin_result = modin_df.dot(pd.Series(np.arange(col_len)))
with pytest.warns(UserWarning):
modin_df.dot(modin_df.T)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
def test_duplicated(self, data, keep):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.duplicated(keep=keep)
modin_result = modin_df.duplicated(keep=keep)
df_equals(modin_result, pandas_result)
import random
subset = random.sample(
list(pandas_df.columns), random.randint(1, len(pandas_df.columns))
)
pandas_result = pandas_df.duplicated(keep=keep, subset=subset)
modin_result = modin_df.duplicated(keep=keep, subset=subset)
df_equals(modin_result, pandas_result)
def test_empty_df(self):
df = pd.DataFrame(index=["a", "b"])
df_is_empty(df)
tm.assert_index_equal(df.index, pd.Index(["a", "b"]))
assert len(df.columns) == 0
df = pd.DataFrame(columns=["a", "b"])
df_is_empty(df)
assert len(df.index) == 0
tm.assert_index_equal(df.columns, pd.Index(["a", "b"]))
df = pd.DataFrame()
df_is_empty(df)
assert len(df.index) == 0
assert len(df.columns) == 0
df = pd.DataFrame(index=["a", "b"])
df_is_empty(df)
tm.assert_index_equal(df.index, pd.Index(["a", "b"]))
assert len(df.columns) == 0
df = pd.DataFrame(columns=["a", "b"])
df_is_empty(df)
assert len(df.index) == 0
tm.assert_index_equal(df.columns, pd.Index(["a", "b"]))
df = pd.DataFrame()
df_is_empty(df)
assert len(df.index) == 0
assert len(df.columns) == 0
def test_equals(self):
frame_data = {"col1": [2.9, 3, 3, 3], "col2": [2, 3, 4, 1]}
modin_df1 = pd.DataFrame(frame_data)
modin_df2 = pd.DataFrame(frame_data)
assert modin_df1.equals(modin_df2)
df_equals(modin_df1, modin_df2)
df_equals(modin_df1, pd.DataFrame(modin_df1))
frame_data = {"col1": [2.9, 3, 3, 3], "col2": [2, 3, 5, 1]}
modin_df3 = pd.DataFrame(frame_data, index=list("abcd"))
assert not modin_df1.equals(modin_df3)
with pytest.raises(AssertionError):
df_equals(modin_df3, modin_df1)
with pytest.raises(AssertionError):
df_equals(modin_df3, modin_df2)
assert modin_df1.equals(modin_df2._query_compiler.to_pandas())
def test_eval_df_use_case(self):
frame_data = {"a": random_state.randn(10), "b": random_state.randn(10)}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
# test eval for series results
tmp_pandas = df.eval("arctan2(sin(a), b)", engine="python", parser="pandas")
tmp_modin = modin_df.eval(
"arctan2(sin(a), b)", engine="python", parser="pandas"
)
assert isinstance(tmp_modin, pd.Series)
df_equals(tmp_modin, tmp_pandas)
# Test not inplace assignments
tmp_pandas = df.eval("e = arctan2(sin(a), b)", engine="python", parser="pandas")
tmp_modin = modin_df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas"
)
df_equals(tmp_modin, tmp_pandas)
# Test inplace assignments
df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas", inplace=True
)
modin_df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas", inplace=True
)
# TODO: Use a series equality validator.
df_equals(modin_df, df)
def test_eval_df_arithmetic_subexpression(self):
frame_data = {"a": random_state.randn(10), "b": random_state.randn(10)}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df.eval("not_e = sin(a + b)", engine="python", parser="pandas", inplace=True)
modin_df.eval(
"not_e = sin(a + b)", engine="python", parser="pandas", inplace=True
)
# TODO: Use a series equality validator.
df_equals(modin_df, df)
def test_ewm(self):
df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})
with pytest.warns(UserWarning):
df.ewm(com=0.5).mean()
def test_expanding(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).expanding()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_explode(self, data):
modin_df = pd.DataFrame(data)
with pytest.warns(UserWarning):
modin_df.explode(modin_df.columns[0])
def test_ffill(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(modin_df.ffill(), test_data.tsframe.ffill())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"method",
["backfill", "bfill", "pad", "ffill", None],
ids=["backfill", "bfill", "pad", "ffill", "None"],
)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("limit", int_arg_values, ids=int_arg_keys)
def test_fillna(self, data, method, axis, limit):
# We are not testing when limit is not positive until pandas-27042 gets fixed.
# We are not testing when axis is over rows until pandas-17399 gets fixed.
if limit > 0 and axis != 1 and axis != "columns":
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.fillna(
0, method=method, axis=axis, limit=limit
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.fillna(0, method=method, axis=axis, limit=limit)
else:
modin_result = modin_df.fillna(0, method=method, axis=axis, limit=limit)
df_equals(modin_result, pandas_result)
def test_fillna_sanity(self):
test_data = TestData()
tf = test_data.tsframe
tf.loc[tf.index[:5], "A"] = np.nan
tf.loc[tf.index[-5:], "A"] = np.nan
zero_filled = test_data.tsframe.fillna(0)
modin_df = pd.DataFrame(test_data.tsframe).fillna(0)
df_equals(modin_df, zero_filled)
padded = test_data.tsframe.fillna(method="pad")
modin_df = pd.DataFrame(test_data.tsframe).fillna(method="pad")
df_equals(modin_df, padded)
# mixed type
mf = test_data.mixed_frame
mf.loc[mf.index[5:20], "foo"] = np.nan
mf.loc[mf.index[-10:], "A"] = np.nan
result = test_data.mixed_frame.fillna(value=0)
modin_df = pd.DataFrame(test_data.mixed_frame).fillna(value=0)
df_equals(modin_df, result)
result = test_data.mixed_frame.fillna(method="pad")
modin_df = pd.DataFrame(test_data.mixed_frame).fillna(method="pad")
df_equals(modin_df, result)
pytest.raises(ValueError, test_data.tsframe.fillna)
pytest.raises(ValueError, pd.DataFrame(test_data.tsframe).fillna)
with pytest.raises(ValueError):
pd.DataFrame(test_data.tsframe).fillna(5, method="ffill")
# mixed numeric (but no float16)
mf = test_data.mixed_float.reindex(columns=["A", "B", "D"])
mf.loc[mf.index[-10:], "A"] = np.nan
result = mf.fillna(value=0)
modin_df = pd.DataFrame(mf).fillna(value=0)
df_equals(modin_df, result)
result = mf.fillna(method="pad")
modin_df = pd.DataFrame(mf).fillna(method="pad")
df_equals(modin_df, result)
# TODO: Use this when Arrow issue resolves:
# (https://issues.apache.org/jira/browse/ARROW-2122)
# empty frame
# df = DataFrame(columns=['x'])
# for m in ['pad', 'backfill']:
# df.x.fillna(method=m, inplace=True)
# df.x.fillna(method=m)
# with different dtype
frame_data = [
["a", "a", np.nan, "a"],
["b", "b", np.nan, "b"],
["c", "c", np.nan, "c"],
]
df = pandas.DataFrame(frame_data)
result = df.fillna({2: "foo"})
modin_df = pd.DataFrame(frame_data).fillna({2: "foo"})
df_equals(modin_df, result)
modin_df = pd.DataFrame(df)
df.fillna({2: "foo"}, inplace=True)
modin_df.fillna({2: "foo"}, inplace=True)
df_equals(modin_df, result)
frame_data = {
"Date": [pandas.NaT, pandas.Timestamp("2014-1-1")],
"Date2": [pandas.Timestamp("2013-1-1"), pandas.NaT],
}
df = pandas.DataFrame(frame_data)
result = df.fillna(value={"Date": df["Date2"]})
modin_df = pd.DataFrame(frame_data).fillna(value={"Date": df["Date2"]})
df_equals(modin_df, result)
# TODO: Use this when Arrow issue resolves:
# (https://issues.apache.org/jira/browse/ARROW-2122)
# with timezone
"""
frame_data = {'A': [pandas.Timestamp('2012-11-11 00:00:00+01:00'),
pandas.NaT]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.fillna(method='pad'), df.fillna(method='pad'))
frame_data = {'A': [pandas.NaT,
pandas.Timestamp('2012-11-11 00:00:00+01:00')]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data).fillna(method='bfill')
df_equals(modin_df, df.fillna(method='bfill'))
"""
def test_fillna_downcast(self):
# infer int64 from float64
frame_data = {"a": [1.0, np.nan]}
df = pandas.DataFrame(frame_data)
result = df.fillna(0, downcast="infer")
modin_df = pd.DataFrame(frame_data).fillna(0, downcast="infer")
df_equals(modin_df, result)
# infer int64 from float64 when fillna value is a dict
df = pandas.DataFrame(frame_data)
result = df.fillna({"a": 0}, downcast="infer")
modin_df = pd.DataFrame(frame_data).fillna({"a": 0}, downcast="infer")
df_equals(modin_df, result)
def test_ffill2(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(
modin_df.fillna(method="ffill"), test_data.tsframe.fillna(method="ffill")
)
def test_bfill2(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(
modin_df.fillna(method="bfill"), test_data.tsframe.fillna(method="bfill")
)
def test_fillna_inplace(self):
frame_data = random_state.randn(10, 4)
df = pandas.DataFrame(frame_data)
df[1][:4] = np.nan
df[3][-4:] = np.nan
modin_df = pd.DataFrame(df)
df.fillna(value=0, inplace=True)
try:
df_equals(modin_df, df)
except AssertionError:
pass
else:
assert False
modin_df.fillna(value=0, inplace=True)
df_equals(modin_df, df)
modin_df = pd.DataFrame(df).fillna(value={0: 0}, inplace=True)
assert modin_df is None
df[1][:4] = np.nan
df[3][-4:] = np.nan
modin_df = pd.DataFrame(df)
df.fillna(method="ffill", inplace=True)
try:
df_equals(modin_df, df)
except AssertionError:
pass
else:
assert False
modin_df.fillna(method="ffill", inplace=True)
df_equals(modin_df, df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_frame_fillna_limit(self, data):
pandas_df = pandas.DataFrame(data)
index = pandas_df.index
result = pandas_df[:2].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="pad", limit=2), result.fillna(method="pad", limit=2)
)
result = pandas_df[-2:].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="backfill", limit=2),
result.fillna(method="backfill", limit=2),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_frame_pad_backfill_limit(self, data):
pandas_df = pandas.DataFrame(data)
index = pandas_df.index
result = pandas_df[:2].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="pad", limit=2), result.fillna(method="pad", limit=2)
)
result = pandas_df[-2:].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="backfill", limit=2),
result.fillna(method="backfill", limit=2),
)
def test_fillna_dtype_conversion(self):
# make sure that fillna on an empty frame works
df = pandas.DataFrame(index=range(3), columns=["A", "B"], dtype="float64")
modin_df = pd.DataFrame(index=range(3), columns=["A", "B"], dtype="float64")
df_equals(modin_df.fillna("nan"), df.fillna("nan"))
frame_data = {"A": [1, np.nan], "B": [1.0, 2.0]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
for v in ["", 1, np.nan, 1.0]:
df_equals(modin_df.fillna(v), df.fillna(v))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_skip_certain_blocks(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# don't try to fill boolean, int blocks
df_equals(modin_df.fillna(np.nan), pandas_df.fillna(np.nan))
def test_fillna_dict_series(self):
frame_data = {
"a": [np.nan, 1, 2, np.nan, np.nan],
"b": [1, 2, 3, np.nan, np.nan],
"c": [np.nan, 1, 2, 3, 4],
}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.fillna({"a": 0, "b": 5}), df.fillna({"a": 0, "b": 5}))
df_equals(
modin_df.fillna({"a": 0, "b": 5, "d": 7}),
df.fillna({"a": 0, "b": 5, "d": 7}),
)
# Series treated same as dict
df_equals(modin_df.fillna(modin_df.max()), df.fillna(df.max()))
def test_fillna_dataframe(self):
frame_data = {
"a": [np.nan, 1, 2, np.nan, np.nan],
"b": [1, 2, 3, np.nan, np.nan],
"c": [np.nan, 1, 2, 3, 4],
}
df = pandas.DataFrame(frame_data, index=list("VWXYZ"))
modin_df = pd.DataFrame(frame_data, index=list("VWXYZ"))
# df2 may have different index and columns
df2 = pandas.DataFrame(
{
"a": [np.nan, 10, 20, 30, 40],
"b": [50, 60, 70, 80, 90],
"foo": ["bar"] * 5,
},
index=list("VWXuZ"),
)
modin_df2 = pd.DataFrame(df2)
# only those columns and indices which are shared get filled
df_equals(modin_df.fillna(modin_df2), df.fillna(df2))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_columns(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.fillna(method="ffill", axis=1),
pandas_df.fillna(method="ffill", axis=1),
)
df_equals(
modin_df.fillna(method="ffill", axis=1),
pandas_df.fillna(method="ffill", axis=1),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_invalid_method(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with tm.assert_raises_regex(ValueError, "ffil"):
modin_df.fillna(method="ffil")
def test_fillna_invalid_value(self):
test_data = TestData()
modin_df = pd.DataFrame(test_data.frame)
# list
pytest.raises(TypeError, modin_df.fillna, [1, 2])
# tuple
pytest.raises(TypeError, modin_df.fillna, (1, 2))
# frame with series
pytest.raises(TypeError, modin_df.iloc[:, 0].fillna, modin_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_col_reordering(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.fillna(method="ffill"), pandas_df.fillna(method="ffill"))
"""
TODO: Use this when Arrow issue resolves:
(https://issues.apache.org/jira/browse/ARROW-2122)
def test_fillna_datetime_columns(self):
frame_data = {'A': [-1, -2, np.nan],
'B': date_range('20130101', periods=3),
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]}
df = pandas.DataFrame(frame_data, index=date_range('20130110', periods=3))
modin_df = pd.DataFrame(frame_data, index=date_range('20130110', periods=3))
df_equals(modin_df.fillna('?'), df.fillna('?'))
frame_data = {'A': [-1, -2, np.nan],
'B': [pandas.Timestamp('2013-01-01'),
pandas.Timestamp('2013-01-02'), pandas.NaT],
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]}
df = pandas.DataFrame(frame_data, index=date_range('20130110', periods=3))
modin_df = pd.DataFrame(frame_data, index=date_range('20130110', periods=3))
df_equals(modin_df.fillna('?'), df.fillna('?'))
"""
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_filter(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
by = {"items": ["col1", "col5"], "regex": "4$|3$", "like": "col"}
df_equals(
modin_df.filter(items=by["items"]), pandas_df.filter(items=by["items"])
)
df_equals(
modin_df.filter(regex=by["regex"], axis=0),
pandas_df.filter(regex=by["regex"], axis=0),
)
df_equals(
modin_df.filter(regex=by["regex"], axis=1),
pandas_df.filter(regex=by["regex"], axis=1),
)
df_equals(modin_df.filter(like=by["like"]), pandas_df.filter(like=by["like"]))
with pytest.raises(TypeError):
modin_df.filter(items=by["items"], regex=by["regex"])
with pytest.raises(TypeError):
modin_df.filter()
def test_first(self):
i = pd.date_range("2018-04-09", periods=4, freq="2D")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.first("3D")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_first_valid_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.first_valid_index() == (pandas_df.first_valid_index())
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_dict(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_dict(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_items(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_items(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_records(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_records(None)
def test_get_value(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).get_value(0, "col1")
def test_get_values(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).get_values()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("n", int_arg_values, ids=arg_keys("n", int_arg_keys))
def test_head(self, data, n):
# Test normal dataframe head
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.head(n), pandas_df.head(n))
df_equals(modin_df.head(len(modin_df) + 1), pandas_df.head(len(pandas_df) + 1))
# Test head when we call it from a QueryCompilerView
modin_result = modin_df.loc[:, ["col1", "col3", "col3"]].head(n)
pandas_result = pandas_df.loc[:, ["col1", "col3", "col3"]].head(n)
df_equals(modin_result, pandas_result)
def test_hist(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).hist(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iat(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.iat()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_idxmax(self, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.idxmax(axis=axis, skipna=skipna)
modin_result = modin_df.idxmax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
pandas_result = pandas_df.T.idxmax(axis=axis, skipna=skipna)
modin_result = modin_df.T.idxmax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_idxmin(self, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.idxmin(axis=axis, skipna=skipna)
pandas_result = pandas_df.idxmin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.idxmin(axis=axis, skipna=skipna)
pandas_result = pandas_df.T.idxmin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
def test_infer_objects(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).infer_objects()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iloc(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if not name_contains(request.node.name, ["empty_data"]):
# Scaler
np.testing.assert_equal(modin_df.iloc[0, 1], pandas_df.iloc[0, 1])
# Series
df_equals(modin_df.iloc[0], pandas_df.iloc[0])
df_equals(modin_df.iloc[1:, 0], pandas_df.iloc[1:, 0])
df_equals(modin_df.iloc[1:2, 0], pandas_df.iloc[1:2, 0])
# DataFrame
df_equals(modin_df.iloc[[1, 2]], pandas_df.iloc[[1, 2]])
# See issue #80
# df_equals(modin_df.iloc[[1, 2], [1, 0]], pandas_df.iloc[[1, 2], [1, 0]])
df_equals(modin_df.iloc[1:2, 0:2], pandas_df.iloc[1:2, 0:2])
# Issue #43
modin_df.iloc[0:3, :]
# Write Item
modin_df.iloc[[1, 2]] = 42
pandas_df.iloc[[1, 2]] = 42
df_equals(modin_df, pandas_df)
else:
with pytest.raises(IndexError):
modin_df.iloc[0, 1]
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.index, pandas_df.index)
modin_df_cp = modin_df.copy()
pandas_df_cp = pandas_df.copy()
modin_df_cp.index = [str(i) for i in modin_df_cp.index]
pandas_df_cp.index = [str(i) for i in pandas_df_cp.index]
df_equals(modin_df_cp.index, pandas_df_cp.index)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_indexing_duplicate_axis(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.index = pandas_df.index = [i // 3 for i in range(len(modin_df))]
assert any(modin_df.index.duplicated())
assert any(pandas_df.index.duplicated())
df_equals(modin_df.iloc[0], pandas_df.iloc[0])
df_equals(modin_df.loc[0], pandas_df.loc[0])
df_equals(modin_df.iloc[0, 0:4], pandas_df.iloc[0, 0:4])
df_equals(
modin_df.loc[0, modin_df.columns[0:4]],
pandas_df.loc[0, pandas_df.columns[0:4]],
)
def test_info(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).info(memory_usage="deep")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("loc", int_arg_values, ids=arg_keys("loc", int_arg_keys))
def test_insert(self, data, loc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df = modin_df.copy()
pandas_df = pandas_df.copy()
column = "New Column"
value = modin_df.iloc[:, 0]
try:
pandas_df.insert(loc, column, value)
except Exception as e:
with pytest.raises(type(e)):
modin_df.insert(loc, column, value)
else:
modin_df.insert(loc, column, value)
df_equals(modin_df, pandas_df)
with pytest.raises(ValueError):
modin_df.insert(0, "Bad Column", modin_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.insert(0, "Duplicate", modin_df[modin_df.columns[0]])
pandas_df.insert(0, "Duplicate", pandas_df[pandas_df.columns[0]])
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.insert(0, "Scalar", 100)
pandas_df.insert(0, "Scalar", 100)
df_equals(modin_df, pandas_df)
with pytest.raises(ValueError):
modin_df.insert(0, "Too Short", list(modin_df[modin_df.columns[0]])[:-1])
with pytest.raises(ValueError):
modin_df.insert(0, modin_df.columns[0], modin_df[modin_df.columns[0]])
with pytest.raises(IndexError):
modin_df.insert(len(modin_df.columns) + 100, "Bad Loc", 100)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = pd.DataFrame(columns=list("ab")).insert(
0, modin_df.columns[0], modin_df[modin_df.columns[0]]
)
pandas_result = pandas.DataFrame(columns=list("ab")).insert(
0, pandas_df.columns[0], pandas_df[pandas_df.columns[0]]
)
df_equals(modin_result, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = pd.DataFrame(index=modin_df.index).insert(
0, modin_df.columns[0], modin_df[modin_df.columns[0]]
)
pandas_result = pandas.DataFrame(index=pandas_df.index).insert(
0, pandas_df.columns[0], pandas_df[pandas_df.columns[0]]
)
df_equals(modin_result, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.insert(
0, "DataFrame insert", modin_df[[modin_df.columns[0]]]
)
pandas_result = pandas_df.insert(
0, "DataFrame insert", pandas_df[[pandas_df.columns[0]]]
)
df_equals(modin_result, pandas_result)
def test_interpolate(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).interpolate()
def test_is_copy(self):
data = test_data_values[0]
with pytest.warns(FutureWarning):
assert pd.DataFrame(data).is_copy == pandas.DataFrame(data).is_copy
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_items(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_items = modin_df.items()
pandas_items = pandas_df.items()
for modin_item, pandas_item in zip(modin_items, pandas_items):
modin_index, modin_series = modin_item
pandas_index, pandas_series = pandas_item
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iteritems(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_items = modin_df.iteritems()
pandas_items = pandas_df.iteritems()
for modin_item, pandas_item in zip(modin_items, pandas_items):
modin_index, modin_series = modin_item
pandas_index, pandas_series = pandas_item
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iterrows(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_iterrows = modin_df.iterrows()
pandas_iterrows = pandas_df.iterrows()
for modin_row, pandas_row in zip(modin_iterrows, pandas_iterrows):
modin_index, modin_series = modin_row
pandas_index, pandas_series = pandas_row
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_itertuples(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# test default
modin_it_default = modin_df.itertuples()
pandas_it_default = pandas_df.itertuples()
for modin_row, pandas_row in zip(modin_it_default, pandas_it_default):
np.testing.assert_equal(modin_row, pandas_row)
# test all combinations of custom params
indices = [True, False]
names = [None, "NotPandas", "Pandas"]
for index in indices:
for name in names:
modin_it_custom = modin_df.itertuples(index=index, name=name)
pandas_it_custom = pandas_df.itertuples(index=index, name=name)
for modin_row, pandas_row in zip(modin_it_custom, pandas_it_custom):
np.testing.assert_equal(modin_row, pandas_row)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.ix()
def test_join(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 0, 1],
"col4": [2, 4, 5, 6],
}
modin_df = pd.DataFrame(frame_data)
pandas_df = pandas.DataFrame(frame_data)
frame_data2 = {"col5": [0], "col6": [1]}
modin_df2 = pd.DataFrame(frame_data2)
pandas_df2 = pandas.DataFrame(frame_data2)
join_types = ["left", "right", "outer", "inner"]
for how in join_types:
modin_join = modin_df.join(modin_df2, how=how)
pandas_join = pandas_df.join(pandas_df2, how=how)
df_equals(modin_join, pandas_join)
frame_data3 = {"col7": [1, 2, 3, 5, 6, 7, 8]}
modin_df3 = pd.DataFrame(frame_data3)
pandas_df3 = pandas.DataFrame(frame_data3)
join_types = ["left", "outer", "inner"]
for how in join_types:
modin_join = modin_df.join([modin_df2, modin_df3], how=how)
pandas_join = pandas_df.join([pandas_df2, pandas_df3], how=how)
df_equals(modin_join, pandas_join)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_keys(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.keys(), pandas_df.keys())
def test_kurt(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).kurt()
def test_kurtosis(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).kurtosis()
def test_last(self):
i = pd.date_range("2018-04-09", periods=4, freq="2D")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.last("3D")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_last_valid_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.last_valid_index() == (pandas_df.last_valid_index())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_loc(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# We skip nan datasets because nan != nan
if "nan" not in request.node.name:
key1 = modin_df.columns[0]
key2 = modin_df.columns[1]
# Scaler
assert modin_df.loc[0, key1] == pandas_df.loc[0, key1]
# Series
df_equals(modin_df.loc[0], pandas_df.loc[0])
df_equals(modin_df.loc[1:, key1], pandas_df.loc[1:, key1])
df_equals(modin_df.loc[1:2, key1], pandas_df.loc[1:2, key1])
# DataFrame
df_equals(modin_df.loc[[1, 2]], pandas_df.loc[[1, 2]])
# List-like of booleans
indices = [
True if i % 3 == 0 else False for i in range(len(modin_df.index))
]
columns = [
True if i % 5 == 0 else False for i in range(len(modin_df.columns))
]
modin_result = modin_df.loc[indices, columns]
pandas_result = pandas_df.loc[indices, columns]
df_equals(modin_result, pandas_result)
# See issue #80
# df_equals(modin_df.loc[[1, 2], ['col1']], pandas_df.loc[[1, 2], ['col1']])
df_equals(modin_df.loc[1:2, key1:key2], pandas_df.loc[1:2, key1:key2])
# From issue #421
df_equals(modin_df.loc[:, [key2, key1]], pandas_df.loc[:, [key2, key1]])
df_equals(modin_df.loc[[2, 1], :], pandas_df.loc[[2, 1], :])
# Write Item
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.loc[[1, 2]] = 42
pandas_df_copy.loc[[1, 2]] = 42
df_equals(modin_df_copy, pandas_df_copy)
def test_loc_multi_index(self):
modin_df = pd.read_csv(
"modin/pandas/test/data/blah.csv", header=[0, 1, 2, 3], index_col=0
)
pandas_df = pandas.read_csv(
"modin/pandas/test/data/blah.csv", header=[0, 1, 2, 3], index_col=0
)
df_equals(modin_df.loc[1], pandas_df.loc[1])
df_equals(modin_df.loc[1, "Presidents"], pandas_df.loc[1, "Presidents"])
df_equals(
modin_df.loc[1, ("Presidents", "Pure mentions")],
pandas_df.loc[1, ("Presidents", "Pure mentions")],
)
assert (
modin_df.loc[1, ("Presidents", "Pure mentions", "IND", "all")]
== pandas_df.loc[1, ("Presidents", "Pure mentions", "IND", "all")]
)
df_equals(
modin_df.loc[(1, 2), "Presidents"], pandas_df.loc[(1, 2), "Presidents"]
)
tuples = [
("bar", "one"),
("bar", "two"),
("bar", "three"),
("bar", "four"),
("baz", "one"),
("baz", "two"),
("baz", "three"),
("baz", "four"),
("foo", "one"),
("foo", "two"),
("foo", "three"),
("foo", "four"),
("qux", "one"),
("qux", "two"),
("qux", "three"),
("qux", "four"),
]
modin_index = pd.MultiIndex.from_tuples(tuples, names=["first", "second"])
pandas_index = pandas.MultiIndex.from_tuples(tuples, names=["first", "second"])
frame_data = np.random.randint(0, 100, size=(16, 100))
modin_df = pd.DataFrame(
frame_data,
index=modin_index,
columns=["col{}".format(i) for i in range(100)],
)
pandas_df = pandas.DataFrame(
frame_data,
index=pandas_index,
columns=["col{}".format(i) for i in range(100)],
)
df_equals(modin_df.loc["bar", "col1"], pandas_df.loc["bar", "col1"])
assert (
modin_df.loc[("bar", "one"), "col1"]
== pandas_df.loc[("bar", "one"), "col1"]
)
df_equals(
modin_df.loc["bar", ("col1", "col2")],
pandas_df.loc["bar", ("col1", "col2")],
)
def test_lookup(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).lookup([0, 1], ["col1", "col2"])
def test_mad(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).mad()
def test_mask(self):
df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"])
m = df % 3 == 0
with pytest.warns(UserWarning):
try:
df.mask(~m, -df)
except ValueError:
pass
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_max(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.max(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.max(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_mean(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.mean(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.mean(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_median(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.median(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.median(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
class TestDFPartTwo:
def test_melt(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).melt()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"index", bool_arg_values, ids=arg_keys("index", bool_arg_keys)
)
def test_memory_usage(self, data, index):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
modin_result = modin_df.memory_usage(index=index)
pandas_result = pandas_df.memory_usage(index=index)
df_equals(modin_result, pandas_result)
def test_merge(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 0, 1],
"col4": [2, 4, 5, 6],
}
modin_df = pd.DataFrame(frame_data)
pandas_df = pandas.DataFrame(frame_data)
frame_data2 = {"col1": [0, 1, 2], "col2": [1, 5, 6]}
modin_df2 = pd.DataFrame(frame_data2)
pandas_df2 = pandas.DataFrame(frame_data2)
join_types = ["outer", "inner"]
for how in join_types:
# Defaults
modin_result = modin_df.merge(modin_df2, how=how)
pandas_result = pandas_df.merge(pandas_df2, how=how)
df_equals(modin_result, pandas_result)
# left_on and right_index
modin_result = modin_df.merge(
modin_df2, how=how, left_on="col1", right_index=True
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_on="col1", right_index=True
)
df_equals(modin_result, pandas_result)
# left_index and right_on
modin_result = modin_df.merge(
modin_df2, how=how, left_index=True, right_on="col1"
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_index=True, right_on="col1"
)
df_equals(modin_result, pandas_result)
# left_on and right_on col1
modin_result = modin_df.merge(
modin_df2, how=how, left_on="col1", right_on="col1"
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_on="col1", right_on="col1"
)
df_equals(modin_result, pandas_result)
# left_on and right_on col2
modin_result = modin_df.merge(
modin_df2, how=how, left_on="col2", right_on="col2"
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_on="col2", right_on="col2"
)
df_equals(modin_result, pandas_result)
# left_index and right_index
modin_result = modin_df.merge(
modin_df2, how=how, left_index=True, right_index=True
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_index=True, right_index=True
)
df_equals(modin_result, pandas_result)
# Named Series promoted to DF
s = pd.Series(frame_data2.get("col1"))
with pytest.raises(ValueError):
modin_df.merge(s)
s = pd.Series(frame_data2.get("col1"), name="col1")
df_equals(modin_df.merge(s), modin_df.merge(modin_df2[["col1"]]))
with pytest.raises(ValueError):
modin_df.merge("Non-valid type")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_min(self, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.min(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.min(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_mode(self, request, data, axis, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.mode(axis=axis, numeric_only=numeric_only)
except Exception:
with pytest.raises(TypeError):
modin_df.mode(axis=axis, numeric_only=numeric_only)
else:
modin_result = modin_df.mode(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ndim(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.ndim == pandas_df.ndim
def test_nlargest(self):
df = pd.DataFrame(
{
"population": [
59000000,
65000000,
434000,
434000,
434000,
337000,
11300,
11300,
11300,
],
"GDP": [1937894, 2583560, 12011, 4520, 12128, 17036, 182, 38, 311],
"alpha-2": ["IT", "FR", "MT", "MV", "BN", "IS", "NR", "TV", "AI"],
},
index=[
"Italy",
"France",
"Malta",
"Maldives",
"Brunei",
"Iceland",
"Nauru",
"Tuvalu",
"Anguilla",
],
)
with pytest.warns(UserWarning):
df.nlargest(3, "population")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notna(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.notna(), pandas_df.notna())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notnull(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.notnull(), pandas_df.notnull())
def test_nsmallest(self):
df = pd.DataFrame(
{
"population": [
59000000,
65000000,
434000,
434000,
434000,
337000,
11300,
11300,
11300,
],
"GDP": [1937894, 2583560, 12011, 4520, 12128, 17036, 182, 38, 311],
"alpha-2": ["IT", "FR", "MT", "MV", "BN", "IS", "NR", "TV", "AI"],
},
index=[
"Italy",
"France",
"Malta",
"Maldives",
"Brunei",
"Iceland",
"Nauru",
"Tuvalu",
"Anguilla",
],
)
with pytest.warns(UserWarning):
df.nsmallest(3, "population")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"dropna", bool_arg_values, ids=arg_keys("dropna", bool_arg_keys)
)
def test_nunique(self, data, axis, dropna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.nunique(axis=axis, dropna=dropna)
pandas_result = pandas_df.nunique(axis=axis, dropna=dropna)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.nunique(axis=axis, dropna=dropna)
pandas_result = pandas_df.T.nunique(axis=axis, dropna=dropna)
df_equals(modin_result, pandas_result)
def test_pct_change(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).pct_change()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pipe(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
n = len(modin_df.index)
a, b, c = 2 % n, 0, 3 % n
col = modin_df.columns[3 % len(modin_df.columns)]
def h(x):
return x.drop(columns=[col])
def g(x, arg1=0):
for _ in range(arg1):
x = x.append(x)
return x
def f(x, arg2=0, arg3=0):
return x.drop([arg2, arg3])
df_equals(
f(g(h(modin_df), arg1=a), arg2=b, arg3=c),
(modin_df.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)),
)
df_equals(
(modin_df.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)),
(pandas_df.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)),
)
def test_pivot(self):
df = pd.DataFrame(
{
"foo": ["one", "one", "one", "two", "two", "two"],
"bar": ["A", "B", "C", "A", "B", "C"],
"baz": [1, 2, 3, 4, 5, 6],
"zoo": ["x", "y", "z", "q", "w", "t"],
}
)
with pytest.warns(UserWarning):
df.pivot(index="foo", columns="bar", values="baz")
def test_pivot_table(self):
df = pd.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
}
)
with pytest.warns(UserWarning):
df.pivot_table(values="D", index=["A", "B"], columns=["C"], aggfunc=np.sum)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_plot(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
# We have to test this way because equality in plots means same object.
zipped_plot_lines = zip(modin_df.plot().lines, pandas_df.plot().lines)
for l, r in zipped_plot_lines:
if isinstance(l.get_xdata(), np.ma.core.MaskedArray) and isinstance(
r.get_xdata(), np.ma.core.MaskedArray
):
assert all((l.get_xdata() == r.get_xdata()).data)
else:
assert np.array_equal(l.get_xdata(), r.get_xdata())
if isinstance(l.get_ydata(), np.ma.core.MaskedArray) and isinstance(
r.get_ydata(), np.ma.core.MaskedArray
):
assert all((l.get_ydata() == r.get_ydata()).data)
else:
assert np.array_equal(l.get_xdata(), r.get_xdata())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pop(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
temp_modin_df = modin_df.copy()
temp_pandas_df = pandas_df.copy()
modin_popped = temp_modin_df.pop(key)
pandas_popped = temp_pandas_df.pop(key)
df_equals(modin_popped, pandas_popped)
df_equals(temp_modin_df, temp_pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize(
"min_count", int_arg_values, ids=arg_keys("min_count", int_arg_keys)
)
def test_prod(self, request, data, axis, skipna, numeric_only, min_count):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.prod(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.prod(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.T.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize(
"min_count", int_arg_values, ids=arg_keys("min_count", int_arg_keys)
)
def test_product(self, request, data, axis, skipna, numeric_only, min_count):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.product(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.product(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.product(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("q", quantiles_values, ids=quantiles_keys)
def test_quantile(self, request, data, q):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if not name_contains(request.node.name, no_numeric_dfs):
df_equals(modin_df.quantile(q), pandas_df.quantile(q))
df_equals(modin_df.quantile(q, axis=1), pandas_df.quantile(q, axis=1))
try:
pandas_result = pandas_df.quantile(q, axis=1, numeric_only=False)
except Exception as e:
with pytest.raises(type(e)):
modin_df.quantile(q, axis=1, numeric_only=False)
else:
modin_result = modin_df.quantile(q, axis=1, numeric_only=False)
df_equals(modin_result, pandas_result)
else:
with pytest.raises(ValueError):
modin_df.quantile(q)
if not name_contains(request.node.name, no_numeric_dfs):
df_equals(modin_df.T.quantile(q), pandas_df.T.quantile(q))
df_equals(modin_df.T.quantile(q, axis=1), pandas_df.T.quantile(q, axis=1))
try:
pandas_result = pandas_df.T.quantile(q, axis=1, numeric_only=False)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.quantile(q, axis=1, numeric_only=False)
else:
modin_result = modin_df.T.quantile(q, axis=1, numeric_only=False)
df_equals(modin_result, pandas_result)
else:
with pytest.raises(ValueError):
modin_df.T.quantile(q)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("funcs", query_func_values, ids=query_func_keys)
def test_query(self, data, funcs):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.query("")
with pytest.raises(NotImplementedError):
x = 2 # noqa F841
modin_df.query("col1 < @x")
try:
pandas_result = pandas_df.query(funcs)
except Exception as e:
with pytest.raises(type(e)):
modin_df.query(funcs)
else:
modin_result = modin_df.query(funcs)
df_equals(modin_result, pandas_result)
def test_query_after_insert(self):
modin_df = pd.DataFrame({"x": [-1, 0, 1, None], "y": [1, 2, None, 3]})
modin_df["z"] = modin_df.eval("x / y")
modin_df = modin_df.query("z >= 0")
modin_result = modin_df.reset_index(drop=True)
modin_result.columns = ["a", "b", "c"]
pandas_df = pd.DataFrame({"x": [-1, 0, 1, None], "y": [1, 2, None, 3]})
pandas_df["z"] = pandas_df.eval("x / y")
pandas_df = pandas_df.query("z >= 0")
pandas_result = pandas_df.reset_index(drop=True)
pandas_result.columns = ["a", "b", "c"]
df_equals(modin_result, pandas_result)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize(
"na_option", ["keep", "top", "bottom"], ids=["keep", "top", "bottom"]
)
def test_rank(self, data, axis, numeric_only, na_option):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.rank(
axis=axis, numeric_only=numeric_only, na_option=na_option
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.rank(axis=axis, numeric_only=numeric_only, na_option=na_option)
else:
modin_result = modin_df.rank(
axis=axis, numeric_only=numeric_only, na_option=na_option
)
df_equals(modin_result, pandas_result)
def test_reindex(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
pandas_df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.reindex([0, 3, 2, 1]), pandas_df.reindex([0, 3, 2, 1]))
df_equals(modin_df.reindex([0, 6, 2]), pandas_df.reindex([0, 6, 2]))
df_equals(
modin_df.reindex(["col1", "col3", "col4", "col2"], axis=1),
pandas_df.reindex(["col1", "col3", "col4", "col2"], axis=1),
)
df_equals(
modin_df.reindex(["col1", "col7", "col4", "col8"], axis=1),
pandas_df.reindex(["col1", "col7", "col4", "col8"], axis=1),
)
df_equals(
modin_df.reindex(index=[0, 1, 5], columns=["col1", "col7", "col4", "col8"]),
pandas_df.reindex(
index=[0, 1, 5], columns=["col1", "col7", "col4", "col8"]
),
)
df_equals(
modin_df.T.reindex(["col1", "col7", "col4", "col8"], axis=0),
pandas_df.T.reindex(["col1", "col7", "col4", "col8"], axis=0),
)
def test_reindex_like(self):
df1 = pd.DataFrame(
[
[24.3, 75.7, "high"],
[31, 87.8, "high"],
[22, 71.6, "medium"],
[35, 95, "medium"],
],
columns=["temp_celsius", "temp_fahrenheit", "windspeed"],
index=pd.date_range(start="2014-02-12", end="2014-02-15", freq="D"),
)
df2 = pd.DataFrame(
[[28, "low"], [30, "low"], [35.1, "medium"]],
columns=["temp_celsius", "windspeed"],
index=pd.DatetimeIndex(["2014-02-12", "2014-02-13", "2014-02-15"]),
)
with pytest.warns(UserWarning):
df2.reindex_like(df1)
def test_rename_sanity(self):
test_data = TestData()
mapping = {"A": "a", "B": "b", "C": "c", "D": "d"}
modin_df = pd.DataFrame(test_data.frame)
df_equals(
modin_df.rename(columns=mapping), test_data.frame.rename(columns=mapping)
)
renamed2 = test_data.frame.rename(columns=str.lower)
df_equals(modin_df.rename(columns=str.lower), renamed2)
modin_df = pd.DataFrame(renamed2)
df_equals(
modin_df.rename(columns=str.upper), renamed2.rename(columns=str.upper)
)
# index
data = {"A": {"foo": 0, "bar": 1}}
# gets sorted alphabetical
df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
tm.assert_index_equal(
modin_df.rename(index={"foo": "bar", "bar": "foo"}).index,
df.rename(index={"foo": "bar", "bar": "foo"}).index,
)
tm.assert_index_equal(
modin_df.rename(index=str.upper).index, df.rename(index=str.upper).index
)
# have to pass something
with pytest.raises(TypeError):
modin_df.rename()
# partial columns
renamed = test_data.frame.rename(columns={"C": "foo", "D": "bar"})
modin_df = pd.DataFrame(test_data.frame)
tm.assert_index_equal(
modin_df.rename(columns={"C": "foo", "D": "bar"}).index,
test_data.frame.rename(columns={"C": "foo", "D": "bar"}).index,
)
# TODO: Uncomment when transpose works
# other axis
# renamed = test_data.frame.T.rename(index={'C': 'foo', 'D': 'bar'})
# tm.assert_index_equal(
# test_data.frame.T.rename(index={'C': 'foo', 'D': 'bar'}).index,
# modin_df.T.rename(index={'C': 'foo', 'D': 'bar'}).index)
# index with name
index = pandas.Index(["foo", "bar"], name="name")
renamer = pandas.DataFrame(data, index=index)
modin_df = pd.DataFrame(data, index=index)
renamed = renamer.rename(index={"foo": "bar", "bar": "foo"})
modin_renamed = modin_df.rename(index={"foo": "bar", "bar": "foo"})
tm.assert_index_equal(renamed.index, modin_renamed.index)
assert renamed.index.name == modin_renamed.index.name
def test_rename_multiindex(self):
tuples_index = [("foo1", "bar1"), ("foo2", "bar2")]
tuples_columns = [("fizz1", "buzz1"), ("fizz2", "buzz2")]
index = pandas.MultiIndex.from_tuples(tuples_index, names=["foo", "bar"])
columns = pandas.MultiIndex.from_tuples(tuples_columns, names=["fizz", "buzz"])
frame_data = [(0, 0), (1, 1)]
df = pandas.DataFrame(frame_data, index=index, columns=columns)
modin_df = pd.DataFrame(frame_data, index=index, columns=columns)
#
# without specifying level -> accross all levels
renamed = df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
modin_renamed = modin_df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
tm.assert_index_equal(renamed.index, modin_renamed.index)
renamed = df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
assert renamed.index.names == modin_renamed.index.names
assert renamed.columns.names == modin_renamed.columns.names
#
# with specifying a level
# dict
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0)
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz")
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz"
)
| tm.assert_index_equal(renamed.columns, modin_renamed.columns) | pandas.util.testing.assert_index_equal |
import re
import pathlib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from rdkit.Chem import MolFromSmiles, Draw, rdFMCS
from rdkit.Chem.Fingerprints import FingerprintMols
from rdkit.Chem.ChemUtils.AlignDepict import AlignDepict
from rdkit.DataStructs import DiceSimilarity
from rdkit.Chem import AllChem
import molvs
input_path = pathlib.Path(__file__).resolve().parent / 'input'
tautomer_canonicalizer = molvs.tautomer.TautomerCanonicalizer(max_tautomers=20)
fragments = molvs.fragment.REMOVE_FRAGMENTS + (
molvs.fragment.FragmentPattern('tartrate', 'O=C(O)C(O)C(O)C(=O)O'),
)
fragment_remover = molvs.fragment.FragmentRemover(fragments)
def mol_to_smiles(mol):
return AllChem.MolToSmiles(mol, isomericSmiles=True)
def get_fingerprint(mol):
return AllChem.GetMorganFingerprint(mol, 2)
old_data = | pd.read_csv(input_path / 'lsp_compounds_20180503.csv') | pandas.read_csv |
'''
PartsGenie (c) University of Liverpool 2020
All rights reserved.
@author: neilswainston
'''
# pylint: disable=invalid-name
# pylint: disable=wrong-import-order
import os.path
from liv_utils import codon_utils, ncbi_tax_utils
import pandas as pd
def get_organisms(parent_id):
'''Get all valid organisms (bacterial with codon usage tables).'''
curr_dir = os.path.dirname(os.path.realpath(__file__))
filepath = os.path.join(curr_dir, '%s.csv' % parent_id)
if not os.path.exists(filepath):
organisms = codon_utils.get_codon_usage_organisms(expand=True)
ids = ncbi_tax_utils.TaxonomyFactory().get_child_ids(parent_id)
valid_ids = set(organisms.values()).intersection(set(ids))
valid_organisms = {name: tax_id for name, tax_id in organisms.items()
if tax_id in valid_ids}
_write(valid_organisms, filepath)
return valid_organisms
return _read(filepath)
def _write(organisms, filepath):
'''Write organisms.'''
df = pd.Series(organisms, name='id')
df.index.name = 'name'
df.to_csv(filepath)
def _read(filepath):
'''Read organisms.'''
df = | pd.read_csv(filepath, dtype=str) | pandas.read_csv |
from ..utils import constants, utils
import numpy as np
import pandas as pd
from collections import Counter
import xlsxwriter
"""
Create a heatmap that depicts the temporal pattern for any dataset that includes a datetime
column. The heatmap does not include a spatial dimension. Also, the module was created for HVI
targeting. This means that when using it for HVI targeting you should filter the input dataset
spatially before running. See the readme file for more information. There are two functions:
one that creates a heatmap with month of year and one that creates a heatmap with week of year.
------------------------
Example
from RADGIS.preprocessing import pol_heatmap
pol_heatmap.heatmap(dataframe, temporalBinType, save_location, timestamp_column=constants.DATETIME)
"""
# Helper functions
# Fills in missing days.
def _addDays(df):
all_days = pd.date_range(df.just_date.min(), df.just_date.max(), freq="D")
po = all_days
po = po.map(lambda t: t.strftime('%Y-%m-%d'))
all_days = set(po)
dfDaySet = set(df.just_date.tolist())
difference = set(map(str,all_days)) - dfDaySet
if len(difference) >= 1:
counter = -1
for d in difference:
# Add this as the default. It's a placeholder and will be updated later outside of this function.
df.loc[counter] = [0,"01", "Monday", d]
counter -= 1
else:
pass
# Fills in missing hours.
def _add_Hours(x):
fullHourSet = set(range(0, 23+1))
hourSet = set(x.HourOfDay)
difference = fullHourSet - hourSet
return list(difference)
# Main function that formats the data.
def heatmap(dataframe, temporalBinType, save_location, timestamp_column=constants.DATETIME):
'''
Return a xlsx file saved to disk.
:param dataframe: pandas DataFrame or TrajDataFrame
DataFrame or TrajDataFrame to be plotted
:param temporalBinType: determines the temporal extent of the scatter plot. Options are:
"MonthOfYear"
"WeekOfYear"
:param save_location: path and name with file extention (i.e., save_location=r"D:\Projects\20191211_TemporalChart_POL\file_name.xlsx")
:param timestamp_column: DataFrame or TrajDataFrame column that contains the datetime information.
Default is constants.DATETIME, which applies if TrajDataFrame and the original tdf datetime column is used.
'''
df = dataframe.copy()
# Add the columns and formatting that will be used to create the heatmap.
# A conditional statement based on the user's temporalBinType input.
if temporalBinType == "MonthOfYear":
df[temporalBinType] = df[timestamp_column].dt.year.astype(str) + '-' + df[timestamp_column].dt.month.astype(str).apply(lambda x: "0" + x if len(x) == 1 else x)
elif temporalBinType == "WeekOfYear":
df[temporalBinType] = df[timestamp_column].dt.year.astype(str) + '-' + df[timestamp_column].dt.strftime('%U').apply(lambda x: "0" + x if len(x) == 1 else x)
# Create a column for the day of week name and for the hour of day. Add a leading 0 to the hour of day if it's a single digit.
df["DayOfWeek"] = df[timestamp_column].dt.day_name()
df['HourOfDay'] = df[timestamp_column].dt.hour.astype(str).apply(lambda x: "0" + x if len(x) == 1 else x)
# Count the events by binning them into the selected temporalBinType, day of week name, and hour of day.
df["TotalHrByYearMonthDayNameHour"] = df.groupby([temporalBinType, "DayOfWeek", "HourOfDay"])[timestamp_column].transform(lambda x: len(x.dt.date.unique()))
# Add the date column, which will be used to fill in the missing days using the _addDays helper function.
df['just_date'] = df[timestamp_column].dt.date
df["just_date"] = df.just_date.map(lambda t: t.strftime('%Y-%m-%d'))
# Only keep the required columns. This is cleaner and decreases the size of the dataset that is processed.
df = df.filter(items=["TotalHrByYearMonthDayNameHour", "HourOfDay", "DayOfWeek", "just_date"])
# Fill in missinges days that are within the min/max range.
_addDays(df)
df.reset_index(drop=True,inplace=True)
# Redo these two columns now that the missing dates have been filled in since there could be new rows
# that represent newly added days with the placeholder values inserted from the _addDays function. Also, the just_date
# column is redone because it needs to be a pandas timestamp.
df["just_date"] = pd.to_datetime(df.just_date)
df["DayOfWeek"] = df.just_date.dt.day_name()
# Redo this column because we did not include it when we filtered the dataframe. It was not included
# because it had to be redone since we added missings days.
if temporalBinType == "MonthOfYear":
df[temporalBinType] = df['just_date'].dt.year.astype(str) + '-' + df['just_date'].dt.month.astype(str).apply(lambda x: "0" + x if len(x) == 1 else x)
elif temporalBinType == "WeekOfYear":
df[temporalBinType] = df['just_date'].dt.year.astype(str) + '-' + df['just_date'].dt.strftime('%U').apply(lambda x: "0" + x if len(x) == 1 else x)
df["HourOfDay"] = df["HourOfDay"].astype(int)
# Create a groupby object that is passed to the _add_Hours helper function that fills in missing hours.
day_Group = df.groupby(["just_date"], sort=False)
# Fill in missing hours.
results = day_Group.apply(_add_Hours)
results = pd.DataFrame(results)
# This takes the index, which is the date, and makes it into a column.
results["just_date"]=results.index.get_level_values("just_date")
# Rename the column that is comprised of the missing hours. Now called HourOfDay.
results.rename(columns={0:"HourOfDay"}, inplace=True)
# Take each row and stack it on the date where the hours of the day that are in a list are stacked vertically under each date.
sxy = results.apply(lambda x: | pd.Series(x["HourOfDay"]) | pandas.Series |
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import collections
import copy
# deadspots = 175.2 (full fov) / res / 2 * discretisation
# eg. 115x44 disc. 5 -> 175.2/115/2*5 = 3.81 deg
# 2nd July (93a92648a5e774c97e3368e3306782382f626b6d) - SR=1, rho=0.1, theta=5 deg
data = {
'115x44':{
'3.81': [1, 1, 1, 0, 1, 1, 1, 1, 1, 1],
'7.62': [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
'11.43': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
'23x9':{
'3.81': [0, 1, 1, 1, 1, 0, 1, 1, 1, 1],
'7.62': [0, 0, 1, 0, 1, 1, 0, 0, 0, 1],
'11.43': [0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
},
}
mean = {}
stdev = np.zeros((len(data.keys()),max([len(v) for k,v in data.items()])))
for i, (k,v) in enumerate(data.items()):
mean[k] = {}
for j, (k2, v2) in enumerate(v.items()):
mean[k][k2] = np.mean(v2)
stdev[i,j] = mean[k][k2] * (1-mean[k][k2]) / (len(v2)-1)
df = | pd.DataFrame(mean) | pandas.DataFrame |
from sklearn.metrics import classification_report, f1_score, confusion_matrix
import editdistance
import pandas as pd
import numpy as np
import os.path
import sys
from reach.pdf_parser.pdf_parse import parse_pdf_document, grab_section
def pretty_confusion_matrix(actual_data, predict_data, labels):
cm = confusion_matrix(actual_data, predict_data, labels = labels)
pretty_conf = pd.DataFrame(
cm,
columns=["Predicted {}".format(label) for label in labels],
index=["Actually {}".format(label) for label in labels]
)
return pretty_conf
def evaluate_metric_scraped(actual, predicted, sections, files, providers):
"""
Input:
actual : a boolean list of whether section text was in the pdf
predicted : a boolean list of whether section text was scraped
sections : a list of the section names for each actual/predicted pair
files : a list of the pdf names
providers : a list of the providers where each pdf came from
Output:
Various metrics for how accurately the scraper scraped a
section or not, no comment on how good the scrape was though
"""
similarity = round(f1_score(actual, predicted, average='micro'), 3)
combined_data = pd.DataFrame({
'Actual' : actual,
'Predicted' : predicted,
'Section' : sections,
'Files' : files,
'Provider' : providers
})
all_providers = list(set(providers))
get_num_pdfs = lambda x: len(x["Files"].unique())
get_num_pdfs_text = lambda x: len(x[x["Actual"]==True]["Files"].unique())
get_prop_text = lambda x: round(sum(x["Actual"]==True) / len(x), 3)
get_f1 = lambda x: round(
f1_score(
list(x["Actual"]),
list(x["Predicted"]),
average='micro'
),
3
)
grouped_provider = combined_data.groupby("Provider")
n_by_prov = grouped_provider.apply(get_num_pdfs)
n_text_by_prov = grouped_provider.apply(get_num_pdfs_text)
prop_text_by_prov = round(n_text_by_prov/n_by_prov, 3)
f1_by_prov = grouped_provider.apply(get_f1)
grouped_provider_section = combined_data.groupby(["Provider","Section"])
n_text_by_sect = grouped_provider_section.apply(get_num_pdfs_text)
prop_text_by_sect = grouped_provider_section.apply(get_prop_text)
f1_by_sect = grouped_provider_section.apply(get_f1)
metrics_by_prov = pd.concat(
[n_by_prov, n_text_by_prov, prop_text_by_prov, f1_by_prov],
axis = 1
)
metrics_by_prov.columns = [
"Number of pdfs included",
"Number of pdfs with sections text",
"Proportion of pdfs with sections text",
"Lenient F1 score for all sections included"
]
trans_n_text_by_sect = pd.DataFrame(
[n_text_by_sect[provider] for provider in all_providers],
index = all_providers
)
trans_n_text_by_sect.columns = [
'Number of pdfs with a {} section'.format(b)\
for b in trans_n_text_by_sect.columns
]
trans_prop_text_by_sect = pd.DataFrame(
[prop_text_by_sect[provider] for provider in all_providers],
index = all_providers
)
trans_prop_text_by_sect.columns = [
'Proportion with a {} section'.format(b)\
for b in trans_prop_text_by_sect.columns
]
trans_f1_by_sect = pd.DataFrame(
[f1_by_sect[provider] for provider in all_providers],
index = all_providers
)
trans_f1_by_sect.columns = [
'Lenient F1 score for the {} section'.format(b)\
for b in trans_f1_by_sect.columns
]
provider_metrics = pd.concat(
[metrics_by_prov, trans_n_text_by_sect,
trans_prop_text_by_sect, trans_f1_by_sect],
axis = 1,
sort=True
)
provider_metrics.index.name = 'Provider'
provider_metrics.reset_index(inplace=True)
n = len(set(files))
n_text = len(
set([f for (f,a) in zip(files, actual) if a])
)
all_provider_metrics = {
'Provider': 'all',
'Number of pdfs included': n,
'Number of pdfs with sections text': n_text,
'Proportion of pdfs with sections text': round(n_text/n, 3),
'Lenient F1 score for all sections included': round(
f1_score(
list(combined_data["Actual"]),
list(combined_data["Predicted"]),
average='micro'
), 3)
}
sections_texts = pd.DataFrame(
{'Section': sections, 'Actual': actual, 'Predicted': predicted}
)
for section_name in set(sections):
section_text = sections_texts[sections_texts['Section']==section_name]
actual_section = section_text['Actual']
predicted_section = section_text['Predicted']
all_provider_metrics[
'Number of pdfs with a {} section'.format(section_name)
] = len(set(
[file for i,file in enumerate(files) if
((sections[i] == section_name) and (actual[i]))]
))
all_provider_metrics[
'Lenient F1 score for the {} section'.format(section_name)
] = f1_score(actual_section, predicted_section, average='micro')
provider_metrics = provider_metrics.append(
all_provider_metrics,
ignore_index=True
)
provider_metrics = (provider_metrics.set_index('Provider').T)
metrics = {
'Lenient F1-score (references section exists or not)' : similarity,
'Metrics by provider' : provider_metrics,
}
return metrics
def levenshtein_distance(actual_text, predicted_text, normalised=True):
"""
Calculate levenshtein distance given an actual and predicted text
"""
distance = editdistance.eval(actual_text, predicted_text)
if normalised:
distance = distance / max(len(actual_text), len(predicted_text))
return distance
def evaluate_metric_quality(scrape_data, levenshtein_threshold):
"""
Normalised Levenshtein distances between actual and predicted section text
for pdfs where there is a section (actual!='')
"""
# Get rid of times when there is no section
scrape_data = list(filter(lambda x: x['Actual text'] != '', scrape_data))
hashes = [s['File'] for s in scrape_data]
actual_texts = [s['Actual text'] for s in scrape_data]
predicted_texts = [s['Predicted text'] for s in scrape_data]
sections = [s['Section'] for s in scrape_data]
# Get all the normalised Lev distances
lev_distances_hash = dict()
for hash, actual_text, predicted_text in zip (hashes, actual_texts, predicted_texts):
lev_distances_hash[hash] = levenshtein_distance(actual_text, predicted_text)
lev_distances = list(lev_distances_hash.values())
# Which sections were found exactly?
equal = [lev_distance == 0 for lev_distance in lev_distances]
# Which sections were found roughly the same?
quite_equal = [
lev_distance<levenshtein_threshold for lev_distance in lev_distances
]
metrics = {
'Number of pdfs with sections text' : len(scrape_data),
'Mean normalised Levenshtein distance' : np.mean(lev_distances),
'Strict accuracy (micro)' : np.mean(equal),
'Lenient accuracy (micro) (normalised Levenshtein < {})'.format(
levenshtein_threshold
) : np.mean(quite_equal)}
for section_name in set(sections):
# Get the Levenshtein distances for this sections actual-predicted pairs
lev_distances_section = [
lev_distance for (section,lev_distance) \
in zip(sections, lev_distances) \
if section == section_name
]
equal_section = [l==0 for l in lev_distances_section]
quite_equal_section = [
l<levenshtein_threshold for l in lev_distances_section
]
strict_acc_section = np.mean(equal_section)
lenient_acc_section = np.mean(quite_equal_section)
metrics[
'Mean normalised Levenshtein distance for the {} section'.format(
section_name
)
] = np.mean(lev_distances_section)
metrics[
'Strict accuracy for the {} section'.format(section_name)
] = strict_acc_section
metrics[
'Lenient accuracy (normalised Levenshtein'+
'< {}) for the {} section'.format(
levenshtein_threshold, section_name
)
] = lenient_acc_section
return {k:round(v,3) for k,v in metrics.items()}, lev_distances_hash
def scrape_process_pdf(
section_names, pdf_name, scrape_pdf_location, actual_texts
):
"""
Input:
section_names : the list of sections we are looking for in the pdf
pdf_name : the name of the pdf
scrape_pdf_location : the file location of the pdf
Output:
scrape_data : a list of dicts with the predicted and actual texts for
each of the sections we looked for in the pdf
"""
if os.path.splitext(pdf_name)[1] == ".pdf":
pdf_name = os.path.splitext(pdf_name)[0]
with open('{}/{}.pdf'.format(scrape_pdf_location, pdf_name), 'r') as f:
pdf_file, full_text, _ = parse_pdf_document(f)
scrape_data = []
for section_name in section_names:
scrape_data.append({
'File' : pdf_name,
'Section' : section_name,
'Predicted text' : grab_section(pdf_file, section_name),
'Actual text' : actual_texts[section_name]})
return scrape_data
def evaluate_find_section(
evaluate_find_section_data, provider_names,
scrape_pdf_location, levenshtein_threshold,
csv=False
):
# Get the predicted text for each of the pdf sections for each pdf
section_names = evaluate_find_section_data[
next(iter(evaluate_find_section_data))
].keys()
scrape_data = []
for pdf_name, actual_texts in evaluate_find_section_data.items():
scrape_data.extend(
scrape_process_pdf(
section_names, pdf_name, scrape_pdf_location, actual_texts
)
)
eval1_scores = evaluate_metric_scraped(
[pred_section['Actual text']!='' for pred_section in scrape_data],
[pred_section['Predicted text']!='' for pred_section in scrape_data],
[pred_section['Section'] for pred_section in scrape_data],
[pred_section['File'] for pred_section in scrape_data],
[provider_names[pred_section['File']] for pred_section in scrape_data]
)
eval2_scores, raw_levenshtein_distance = evaluate_metric_quality(
scrape_data, levenshtein_threshold)
eval_scores_find = {
"Score" : 1 - eval2_scores['Mean normalised Levenshtein distance']
}
eval_scores_find.update(eval1_scores)
eval_scores_find.update(eval2_scores)
# Write out the full evaluation data to csv file
if csv:
df = | pd.DataFrame(scrape_data) | pandas.DataFrame |
# import libraries
import requests
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup as bs
import time
import random
import re
import os
# Important Note ---
# change the value for which you want to scrape the data defaults to 2008-2019
year_list = [year for year in range(2019, 2007, -1)]
# project paths
project_root_dir = os.path.normpath(os.getcwd() + os.sep + os.pardir)
file_path = os.path.join(project_root_dir, "data")
os.makedirs(file_path, exist_ok=True)
# function for loading data
def load_data(filename, file_path=file_path):
csv_path = os.path.join(file_path, filename)
return pd.read_csv(csv_path)
# function for saving data as csv file
def save_dataframe(df, filename, file_path=file_path):
"""
This function takes a dataframe and save it as a csv file.
df: dataframe to save
filename: Name to use for the csv file eg: 'my_file.csv'
file_path = where to save the file
"""
path = os.path.join(file_path, filename)
df.to_csv(path, index=False)
def get_batting_data(year):
"""This function gets the data from ipl official website,
extract all the table data and return it as a pandas dataframe.
"""
try:
# get the html from the website
url = "https://www.iplt20.com/stats/{}/most-runs".format(year)
response = requests.get(url)
batting_html = response.text
# parse the html
batting_soup = bs(batting_html, features="lxml")
# get the table data
batting_table_data = batting_soup.find(class_="js-table")
# get the column names
col_names = []
for header in batting_table_data.find_all("th"):
col_names.append(header.text.strip())
# create the dataframe
a_list = []
for data in batting_table_data.find_all("td"):
a_list.append(" ".join(data.text.split()))
n = 14
final = [a_list[i : i + n] for i in range(0, len(a_list), n)]
df = pd.DataFrame(final)
df.columns = col_names
# Add the nationality of each player in the dataframe
nationality_list = []
for index, data in enumerate(batting_table_data.find_all("tr")[1:]):
try:
nationality_list.append(data["data-nationality"])
except Exception as e:
print(e)
print(index)
# add none
nationality_list.append(None)
df["Nationality"] = nationality_list
# Add the player link for more info in the dataframe
base_url = "https://www.iplt20.com"
player_link_list = []
try:
# get all the links and add it to the list
for data in batting_table_data.find_all("a"):
player_link_list.append(base_url + data["href"])
# create a column with None value
df[15] = None
# iterate through each row and create a player name pattern
for index, row in df.iterrows():
player_name = row["PLAYER"].replace(" ", "-")
player_regex = re.compile(r"{}".format(player_name), re.IGNORECASE)
for item in player_link_list:
# if the pattern matches any links
if player_regex.search(item) != None:
# then append it to that row of the df
df.iloc[index, 15] = item
# rename the column
df.rename(columns={15: "Player Link"}, inplace=True)
# extract the player team name from the link and add to the df
team_regex = r"teams/(\w+-\w+-?\w+)"
df["Team"] = df["Player Link"].str.extract(team_regex, flags=re.IGNORECASE)
df["Team"] = df["Team"].apply(lambda x: str(x).title().replace("-", " "))
# convert data types from string to numeric
df["POS"] = pd.to_numeric(df["POS"], errors="coerce").fillna(0)
df["Mat"] = pd.to_numeric(df["Mat"], errors="coerce").fillna(0)
df["Inns"] = pd.to_numeric(df["Inns"], errors="coerce").fillna(0)
df["NO"] = pd.to_numeric(df["NO"], errors="coerce").fillna(0)
df["Runs"] = pd.to_numeric(df["Runs"], errors="coerce").fillna(0)
df["HS"] = pd.to_numeric(
df["HS"].str.replace("*", ""), errors="coerce"
).fillna(0)
df["Avg"] = pd.to_numeric(df["Avg"], errors="coerce").fillna(0)
df["BF"] = pd.to_numeric(df["BF"], errors="coerce").fillna(0)
df["SR"] = pd.to_numeric(df["SR"], errors="coerce").fillna(0)
df["100"] = pd.to_numeric(df["100"], errors="coerce").fillna(0)
df["50"] = pd.to_numeric(df["50"], errors="coerce").fillna(0)
df["4s"] = pd.to_numeric(df["4s"], errors="coerce").fillna(0)
df["6s"] = pd.to_numeric(df["6s"], errors="coerce").fillna(0)
# Add season year
df["Season"] = year
except Exception as e:
print(e)
print(year)
except Exception as e:
print(e)
print(year)
# return the dataframe
return df
def combine_all_years_data(function, year_list):
"""
Common function for combining data for all the years for a
given table from ipl website or any other. All table have
different functions to get the data from the websites.
"""
try:
# create an empty list to hold all the dataframes
df_list = []
# loop through each year and extract the data
for year in year_list:
# call the function to get the data for that year
df = function(year)
# append the data to the df list
df_list.append(df)
# add some random pause
time.sleep(1 + 2 * random.random())
# concat all the dataframes
df = pd.concat(df_list, ignore_index=True)
except Exception as e:
print(e)
print(year)
# return the dataframe
return df
def get_points_table_data(year):
"""This Function takes the year value and extract the points table data
from HowStat and return it as a Pandas Dataframe.
"""
try:
url = "http://www.howstat.com/cricket/Statistics/IPL/PointsTable.asp?s={}".format(
year
)
response = requests.get(url)
except Exception as e:
print(e)
print(year)
try:
# get the html text
points_html_str = response.text
# parse it using BeautifulSoup
points_soup = bs(points_html_str, features="lxml")
# Get all the Table data
table_data = points_soup.find(class_="TableLined")
# create an empty list
a_list = []
# loop through all the table data and extract the desired value and append
# it to the empty list
for data in table_data.find_all("td"):
a_list.append(data.text.strip())
# total item to put in a list as we have 10 columns
n = 10
# create a list of list each contains 10 items
final = [a_list[i : i + n] for i in range(0, len(a_list), n)]
# create a dataframe from the list of list
df = pd.DataFrame(final)
# set the column names which is in the 0th index
df.columns = df.iloc[0]
# drop the column names from the 0th index
df = df.drop(df.index[0])
# convert the data types of all the following columns
col_to_convert = ["Mat", "Won", "Lost", "Tied", "N/R", "Points", "Net R/R"]
# function for converting string to numerical values
def convert_to_float(val):
return float(val)
# do the conversion for each column
for col in col_to_convert:
df[col] = df[col].apply(convert_to_float)
# add season year
df["Season"] = year
except Exception as e:
print(e)
print("year:", year)
print("Status Code:", response.status_code)
# return the dataframe
return df
def get_series_matches_data(year):
"""This function takes the year value and returns the series match
data.
"""
try:
url = "http://howstat.com/cricket/Statistics/IPL/SeriesMatches.asp?s={}".format(
year
)
response = requests.get(url)
except Exception as e:
print(e)
print(year)
try:
# get the html text
series_match_html = response.text
# parse the html text
series_soup = bs(series_match_html, features="lxml")
# get the table data
series_table_data = series_soup.find(class_="TableLined")
# an empty list and append all the data to it
a_list = []
for data in series_table_data.find_all("td"):
a_list.append(data.text.strip())
n = 4
final = [a_list[i : i + n] for i in range(0, len(a_list), n)]
df = pd.DataFrame(final)
df.columns = df.iloc[0]
df = df.drop(df.index[0])
# convert to datetime object
df["Date"] = pd.to_datetime(df["Date"])
# split the match number and teams names
df[["Match Number", "Teams"]] = df["Match"].str.split(":", expand=True)
# get the team A and team B names
df[["Team A", "Team B"]] = df["Teams"].str.split("v", expand=True)
# matching pattern for team names
team_regex = r"""
(Rajasthan\sRoyals|Kings\sXI\sPunjab|Chennai\sSuper\sKings|Delhi\sCapitals|Mumbai\sIndians|
Kolkata\sKnight\sRiders|Royal\sChallengers\sBangalore|Deccan\sChargers|Kochi\sTuskers\sKerala|
Pune\sWarriors|Sunrisers\sHyderabad|Gujarat\sLions|Rising\sPune\sSupergiant|No\sresult|Match\sabandoned)
"""
# Extract the data
df["winner"] = df["Result"].str.extract(
team_regex, flags=re.VERBOSE | re.IGNORECASE
)
df["Wins By Runs"] = (
df["Result"]
.str.extract(r"(\d{1,3})\s(Runs|Run)", flags=re.IGNORECASE)
.fillna(0)
.iloc[:, 0]
)
df["Wins By Wickets"] = (
df["Result"]
.str.extract(r"(\d{1,2})\s(Wickets|Wicket)", flags=re.IGNORECASE)
.fillna(0)
.iloc[:, 0]
)
df["Season"] = df["Date"].dt.year
# columns to drop
cols_to_drop = ["Match", "Teams", "Result"]
df = df.drop(cols_to_drop, axis=1)
# convert strings to int
df["Wins By Runs"] = df["Wins By Runs"].astype("int")
df["Wins By Wickets"] = df["Wins By Wickets"].astype("int")
except Exception as e:
print(e)
print(year)
print(response.status_code)
# return the dataframe
return df
def get_fastest_fifties_data(year):
"""
Get the fastest fifties data.
"""
try:
url = "https://www.iplt20.com/stats/{}/fastest-fifties".format(year)
response = requests.get(url)
fifties_html = response.text
fifties_soup = bs(fifties_html, features="lxml")
# get the table data
fifties_table_data = fifties_soup.find(class_="js-table")
# get the column names
col_names = []
for header in fifties_table_data.find_all("th"):
col_names.append(header.text.strip())
a_list = []
for data in fifties_table_data.find_all("td"):
a_list.append(" ".join(data.text.split()))
n = 9
final = [a_list[i : i + n] for i in range(0, len(a_list), n)]
df = pd.DataFrame(final)
df.columns = col_names
# convert to datetime object
df["Match Date"] = pd.to_datetime(df["Match Date"])
# convert data types
df["POS"] = pd.to_numeric(df["POS"], errors="coerce").fillna(0)
df["BF"] = pd.to_numeric(df["BF"], errors="coerce").fillna(0)
df["6s"] = pd.to_numeric(df["6s"], errors="coerce").fillna(0)
df["4s"] = pd.to_numeric(df["4s"], errors="coerce").fillna(0)
df["Runs"] = pd.to_numeric(df["Runs"], errors="coerce").fillna(0)
# Add season year
df["Season"] = year
except Exception as e:
print(e)
print(year)
return df
def get_fastest_centuries_data(year):
"""
Extract fastest centuries data for this year.
"""
try:
url = "https://www.iplt20.com/stats/{}/fastest-centuries".format(year)
response = requests.get(url)
centuries_html = response.text
centuries_soup = bs(centuries_html, features="lxml")
# get the table data
centuries_table_data = centuries_soup.find(class_="js-table")
# get the column names
col_names = []
for header in centuries_table_data.find_all("th"):
col_names.append(header.text.strip())
a_list = []
for data in centuries_table_data.find_all("td"):
a_list.append(" ".join(data.text.split()))
n = 9
final = [a_list[i : i + n] for i in range(0, len(a_list), n)]
df = pd.DataFrame(final)
df.columns = col_names
# convert to datetime object
df["Match Date"] = pd.to_datetime(df["Match Date"])
# convert data from string to numeric
df["POS"] = pd.to_numeric(df["POS"], errors="coerce").fillna(0)
df["BF"] = pd.to_numeric(df["BF"], errors="coerce").fillna(0)
df["6s"] = pd.to_numeric(df["6s"], errors="coerce").fillna(0)
df["4s"] = pd.to_numeric(df["4s"], errors="coerce").fillna(0)
df["Runs"] = | pd.to_numeric(df["Runs"], errors="coerce") | pandas.to_numeric |
# https://www.kaggle.com/dlarionov/feature-engineering-xgboost/
import time
import pickle
import gc
import sys
from xgboost import plot_importance
from xgboost import XGBRegressor
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import LabelEncoder
from itertools import product
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 100)
# %matplotlib inline
def plot_features(booster, figsize):
fig, ax = plt.subplots(1, 1, figsize=figsize)
return plot_importance(booster=booster, ax=ax)
print(sys.version_info)
items = pd.read_csv('../input/items.csv')
shops = pd.read_csv('../input/shops.csv')
cats = pd.read_csv('../input/item_categories.csv')
train = pd.read_csv('../input/sales_train.csv')
# set index to ID to avoid dropping it later
test = pd.read_csv('../input/test.csv').set_index('ID')
# Plot Outliers
plt.figure(figsize=(10, 4))
plt.xlim(-100, 3000)
sns.boxplot(x=train.item_cnt_day)
plt.figure(figsize=(10, 4))
plt.xlim(train.item_price.min(), train.item_price.max()*1.1)
sns.boxplot(x=train.item_price)
## Remove Outliers
train = train[train.item_price < 100000]
train = train[train.item_cnt_day < 1001]
# There is one item with price below zero. Fill it with median.
median = train[(train.shop_id == 32) & (train.item_id == 2973) & (
train.date_block_num == 4) & (train.item_price > 0)].item_price.median()
train.loc[train.item_price < 0, 'item_price'] = median
# Several shops are duplicates of each other (according to its name). Fix train and test set.
# Якутск Орджоникидзе, 56
train.loc[train.shop_id == 0, 'shop_id'] = 57
test.loc[test.shop_id == 0, 'shop_id'] = 57
# Якутск ТЦ "Центральный"
train.loc[train.shop_id == 1, 'shop_id'] = 58
test.loc[test.shop_id == 1, 'shop_id'] = 58
# Жуковский ул. Чкалова 39м²
train.loc[train.shop_id == 10, 'shop_id'] = 11
test.loc[test.shop_id == 10, 'shop_id'] = 11
# Shops/Cats/Items preprocessing
# Each shop_name starts with the city name.
# Each category contains type and subtype in its name.
shops.loc[shops.shop_name == 'Сергиев Посад ТЦ "7Я"',
'shop_name'] = 'СергиевПосад ТЦ "7Я"'
shops['city'] = shops['shop_name'].str.split(' ').map(lambda x: x[0])
shops.loc[shops.city == '!Якутск', 'city'] = 'Якутск'
shops['city_code'] = LabelEncoder().fit_transform(shops['city'])
shops = shops[['shop_id', 'city_code']]
cats['split'] = cats['item_category_name'].str.split('-')
cats['type'] = cats['split'].map(lambda x: x[0].strip())
cats['type_code'] = LabelEncoder().fit_transform(cats['type'])
# if subtype is nan then type
cats['subtype'] = cats['split'].map(
lambda x: x[1].strip() if len(x) > 1 else x[0].strip())
cats['subtype_code'] = LabelEncoder().fit_transform(cats['subtype'])
cats = cats[['item_category_id', 'type_code', 'subtype_code']]
items.drop(['item_name'], axis=1, inplace=True)
# Monthly sales
len(list(set(test.item_id) - set(test.item_id).intersection(set(train.item_id)))
), len(list(set(test.item_id))), len(test)
ts = time.time()
matrix = []
cols = ['date_block_num', 'shop_id', 'item_id']
for i in range(34):
sales = train[train.date_block_num == i]
matrix.append(np.array(list(
product([i], sales.shop_id.unique(), sales.item_id.unique())), dtype='int16'))
matrix = pd.DataFrame(np.vstack(matrix), columns=cols)
matrix['date_block_num'] = matrix['date_block_num'].astype(np.int8)
matrix['shop_id'] = matrix['shop_id'].astype(np.int8)
matrix['item_id'] = matrix['item_id'].astype(np.int16)
matrix.sort_values(cols, inplace=True)
time.time() - ts
train['revenue'] = train['item_price'] * train['item_cnt_day']
# # In[11]
ts = time.time()
group = train.groupby(['date_block_num', 'shop_id', 'item_id']).agg(
{'item_cnt_day': ['sum']})
group.columns = ['item_cnt_month']
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=cols, how='left')
matrix['item_cnt_month'] = (matrix['item_cnt_month']
.fillna(0)
.clip(0, 20) # NB clip target here
.astype(np.float16))
time.time() - ts
# Test set
# To use time tricks append test pairs to the matrix.
test['date_block_num'] = 34
test['date_block_num'] = test['date_block_num'].astype(np.int8)
test['shop_id'] = test['shop_id'].astype(np.int8)
test['item_id'] = test['item_id'].astype(np.int16)
# #In[13]
## Write the modified test dataframe for the model
test.to_csv('test_modified.csv', index=True)
ts = time.time()
matrix = pd.concat([matrix, test], ignore_index=True, sort=False, keys=cols)
matrix.fillna(0, inplace=True) # 34 month
time.time() - ts
# Out[13]:
# Shops/Items/Cats features
ts = time.time()
matrix = pd.merge(matrix, shops, on=['shop_id'], how='left')
matrix = pd.merge(matrix, items, on=['item_id'], how='left')
matrix = pd.merge(matrix, cats, on=['item_category_id'], how='left')
matrix['city_code'] = matrix['city_code'].astype(np.int8)
matrix['item_category_id'] = matrix['item_category_id'].astype(np.int8)
matrix['type_code'] = matrix['type_code'].astype(np.int8)
matrix['subtype_code'] = matrix['subtype_code'].astype(np.int8)
time.time() - ts
# Target lags
def lag_feature(df, lag_list, col):
tmp = df[['date_block_num', 'shop_id', 'item_id', col]]
for j in lag_list:
shifted = tmp.copy()
shifted.columns = ['date_block_num',
'shop_id', 'item_id', col +'_lag_' + str(j)]
shifted['date_block_num'] += j
df = pd.merge(df, shifted, on=[
'date_block_num', 'shop_id', 'item_id'], how='left')
return df
ts = time.time()
matrix = lag_feature(matrix, [1, 2, 3, 6, 12], 'item_cnt_month')
time.time() - ts
# Out[16]:
ts = time.time()
group = matrix.groupby(['date_block_num']).agg({'item_cnt_month': ['mean']})
group.columns = ['date_avg_item_cnt']
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=['date_block_num'], how='left')
matrix['date_avg_item_cnt'] = matrix['date_avg_item_cnt'].astype(np.float16)
matrix = lag_feature(matrix, [1], 'date_avg_item_cnt')
matrix.drop(['date_avg_item_cnt'], axis=1, inplace=True)
time.time() - ts
# Out[17]:
ts = time.time()
group = matrix.groupby(['date_block_num', 'item_id']).agg(
{'item_cnt_month': ['mean']})
group.columns = ['date_item_avg_item_cnt']
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=['date_block_num', 'item_id'], how='left')
matrix['date_item_avg_item_cnt'] = matrix['date_item_avg_item_cnt'].astype(
np.float16)
matrix = lag_feature(matrix, [1, 2, 3, 6, 12], 'date_item_avg_item_cnt')
matrix.drop(['date_item_avg_item_cnt'], axis=1, inplace=True)
time.time() - ts
# Out[18]:
ts = time.time()
group = matrix.groupby(['date_block_num', 'shop_id']).agg(
{'item_cnt_month': ['mean']})
group.columns = ['date_shop_avg_item_cnt']
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=['date_block_num', 'shop_id'], how='left')
matrix['date_shop_avg_item_cnt'] = matrix['date_shop_avg_item_cnt'].astype(
np.float16)
matrix = lag_feature(matrix, [1, 2, 3, 6, 12], 'date_shop_avg_item_cnt')
matrix.drop(['date_shop_avg_item_cnt'], axis=1, inplace=True)
time.time() - ts
# Out[19]:
ts = time.time()
group = matrix.groupby(['date_block_num', 'item_category_id']).agg(
{'item_cnt_month': ['mean']})
group.columns = ['date_cat_avg_item_cnt']
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=[
'date_block_num', 'item_category_id'], how='left')
matrix['date_cat_avg_item_cnt'] = matrix['date_cat_avg_item_cnt'].astype(
np.float16)
matrix = lag_feature(matrix, [1], 'date_cat_avg_item_cnt')
matrix.drop(['date_cat_avg_item_cnt'], axis=1, inplace=True)
time.time() - ts
# Out[20]:
ts = time.time()
group = matrix.groupby(['date_block_num', 'shop_id', 'item_category_id']).agg(
{'item_cnt_month': ['mean']})
group.columns = ['date_shop_cat_avg_item_cnt']
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=[
'date_block_num', 'shop_id', 'item_category_id'], how='left')
matrix['date_shop_cat_avg_item_cnt'] = matrix['date_shop_cat_avg_item_cnt'].astype(
np.float16)
matrix = lag_feature(matrix, [1], 'date_shop_cat_avg_item_cnt')
matrix.drop(['date_shop_cat_avg_item_cnt'], axis=1, inplace=True)
time.time() - ts
# Out[21]:
ts = time.time()
group = matrix.groupby(['date_block_num', 'shop_id', 'type_code']).agg(
{'item_cnt_month': ['mean']})
group.columns = ['date_shop_type_avg_item_cnt']
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=[
'date_block_num', 'shop_id', 'type_code'], how='left')
matrix['date_shop_type_avg_item_cnt'] = matrix['date_shop_type_avg_item_cnt'].astype(
np.float16)
matrix = lag_feature(matrix, [1], 'date_shop_type_avg_item_cnt')
matrix.drop(['date_shop_type_avg_item_cnt'], axis=1, inplace=True)
time.time() - ts
# Out[22]:
ts = time.time()
group = matrix.groupby(['date_block_num', 'shop_id', 'subtype_code']).agg(
{'item_cnt_month': ['mean']})
group.columns = ['date_shop_subtype_avg_item_cnt']
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=[
'date_block_num', 'shop_id', 'subtype_code'], how='left')
matrix['date_shop_subtype_avg_item_cnt'] = matrix['date_shop_subtype_avg_item_cnt'].astype(
np.float16)
matrix = lag_feature(matrix, [1], 'date_shop_subtype_avg_item_cnt')
matrix.drop(['date_shop_subtype_avg_item_cnt'], axis=1, inplace=True)
time.time() - ts
# Out[23]:
ts = time.time()
group = matrix.groupby(['date_block_num', 'city_code']
).agg({'item_cnt_month': ['mean']})
group.columns = ['date_city_avg_item_cnt']
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=[
'date_block_num', 'city_code'], how='left')
matrix['date_city_avg_item_cnt'] = matrix['date_city_avg_item_cnt'].astype(
np.float16)
matrix = lag_feature(matrix, [1], 'date_city_avg_item_cnt')
matrix.drop(['date_city_avg_item_cnt'], axis=1, inplace=True)
time.time() - ts
# Out[24]:
ts = time.time()
group = matrix.groupby(['date_block_num', 'item_id', 'city_code']).agg(
{'item_cnt_month': ['mean']})
group.columns = ['date_item_city_avg_item_cnt']
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=[
'date_block_num', 'item_id', 'city_code'], how='left')
matrix['date_item_city_avg_item_cnt'] = matrix['date_item_city_avg_item_cnt'].astype(
np.float16)
matrix = lag_feature(matrix, [1], 'date_item_city_avg_item_cnt')
matrix.drop(['date_item_city_avg_item_cnt'], axis=1, inplace=True)
time.time() - ts
print(f"{time.time() - ts}")
# Out[25]:
ts = time.time()
group = matrix.groupby(['date_block_num', 'type_code']
).agg({'item_cnt_month': ['mean']})
group.columns = ['date_type_avg_item_cnt']
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=[
'date_block_num', 'type_code'], how='left')
matrix['date_type_avg_item_cnt'] = matrix['date_type_avg_item_cnt'].astype(
np.float16)
matrix = lag_feature(matrix, [1], 'date_type_avg_item_cnt')
matrix.drop(['date_type_avg_item_cnt'], axis=1, inplace=True)
time.time() - ts
print(f"{time.time() - ts}")
# Out[26]:
ts = time.time()
group = matrix.groupby(['date_block_num', 'subtype_code']
).agg({'item_cnt_month': ['mean']})
group.columns = ['date_subtype_avg_item_cnt']
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=[
'date_block_num', 'subtype_code'], how='left')
matrix['date_subtype_avg_item_cnt'] = matrix['date_subtype_avg_item_cnt'].astype(
np.float16)
matrix = lag_feature(matrix, [1], 'date_subtype_avg_item_cnt')
matrix.drop(['date_subtype_avg_item_cnt'], axis=1, inplace=True)
print(f"{time.time() - ts}")
# Out[27]:
# Trend features
# Price trend for the last six months
#
ts = time.time()
group = train.groupby(['item_id']).agg({'item_price': ['mean']})
group.columns = ['item_avg_item_price']
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=['item_id'], how='left')
matrix['item_avg_item_price'] = matrix['item_avg_item_price'].astype(
np.float16)
group = train.groupby(['date_block_num', 'item_id']
).agg({'item_price': ['mean']})
group.columns = ['date_item_avg_item_price']
group.reset_index(inplace=True)
matrix = | pd.merge(matrix, group, on=['date_block_num', 'item_id'], how='left') | pandas.merge |
# -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
import pandas as pd
from src.utils.config import Config
from src.features import build_features
from dotenv import find_dotenv, load_dotenv
from sklearn.manifold import TSNE
import umap
from sklearn.decomposition import PCA
import numpy as np
from sklearn.preprocessing import RobustScaler as rs
from sklearn.preprocessing import MinMaxScaler as mms
from sklearn.preprocessing import StandardScaler as sd
project_dir=Config.project_dir
def process_data():
labels= pd.read_csv(project_dir / "data/raw/labels.csv")
expression_data = pd.read_csv(project_dir / "data/raw/data.csv")
#rename and Merge labels and features
expression_data.rename({"Unnamed: 0":"sample"}, axis='columns', inplace =True)
labels.rename({"Unnamed: 0":"sample"}, axis='columns', inplace =True)
labled_expression_merged = pd.merge(labels,expression_data,on="sample")
# save
expression_data=expression_data.drop("sample",axis=1)
expression_data.to_csv(project_dir/ "data/processed/expression_data_original.csv")
labels=labels.drop("sample",axis=1)
labels.to_csv(project_dir/ "data/processed/labels.csv")
labled_expression_merged.to_csv(project_dir/ "data/processed/merged_expression_dataset.csv", index=True)
"""[Robust scaling ]
Robust rescaling the expression levels of each gene,
applying the formula :
rescaled = (gene_expression - median(gene_expression)) / IQR(gene_expression) where IQR stands for Inter Quartile Range.
"""
expression_data_centered = rs().fit_transform(expression_data)
df_expression_data_centered = pd.DataFrame(expression_data_centered,columns=expression_data.columns)
df_expression_data_centered.to_csv(project_dir/ "data/processed/expression_data_centerted.csv")
"""[standard scaling ]
"""
expression_data_standardized = sd().fit_transform(expression_data)
df_expression_data_standardized = pd.DataFrame(expression_data_standardized,columns=expression_data.columns)
df_expression_data_standardized.to_csv(project_dir/ "data/processed/expression_data_standardized.csv")
y = labels['Class'].values
true_labels = np.array([Config.labels_map[element] for element in y])
df_true_labels = pd.DataFrame(true_labels,columns=["Class"])
df_true_labels.to_csv(project_dir/ "data/processed/true_labels.csv")
expression_level_5000_HGV , features_5000_HGV= build_features.top_k_variance(
expression_data.values,
k=1000,
names= expression_data.columns
)
#--------------------- data reduction -----------------------#
pca_reducer = PCA(n_components=2)
pca_reducer.fit(expression_data )
pc = pca_reducer.transform(expression_data )
X_tsne = TSNE(n_components=2).fit_transform(expression_data)
UMAP_COMPONENTS_REDUCTION = 2
UMAP_COMPONENTS_FEATURES = 20
UMAP_EPOCHS = 2000
manifold_reducer = umap.UMAP(
n_components=UMAP_COMPONENTS_REDUCTION,
n_neighbors=200,
n_epochs=UMAP_EPOCHS,
metric='cosine',
min_dist=0.9)
manifold = manifold_reducer.fit_transform(expression_data)
# saving tranformed data
components= ["c1","c2"]
df_PCA =pd.DataFrame(pc,columns=components)
df_PCA.to_csv(Config.project_dir/ "data/transformed/PCA_reduction.csv")
df_PCA =pd.DataFrame(X_tsne,columns=components)
df_PCA.to_csv(Config.project_dir/ "data/transformed/TSNA_reduction.csv")
df_PCA =pd.DataFrame(manifold,columns=components)
df_PCA.to_csv(Config.project_dir/ "data/transformed/UMAP_reduction.csv")
# saving hvg
df_expression_level_5000_HGV =pd.DataFrame(expression_level_5000_HGV,columns=features_5000_HGV)
df_expression_level_5000_HGV.to_csv(Config.project_dir/ "data/transformed/expression_data_HVG_1000.csv")
def get_data(data_type:str):
"""
this function :
imports data
Args:
data_type (str): ["original","centered","standardized"] the type of data you want to import
Returns:
[tuple]: containing (the merged data , features , labels , true labels )
"""
merged_data= | pd.read_csv(Config.data / f"processed/merged_expression_dataset.csv",index_col=0) | pandas.read_csv |
# tallylib/textrank.py
import pandas as pd
import spacy
import en_core_web_sm
import pytextrank
from datetime import datetime
from datetime import timedelta
from django.db import connection
# Local
from tallylib.sql import getYelpReviews
def yelpTrendyPhrases(business_id,
periods=12,
bagging_periods=3,
days_per_period=30,
topk=10
):
'''
1. Get Yelp review texts
2. Bag review texts within certain period, e.g. 6 peridos (180 days)
3. Use Textrank to get scores
4. Return JSON format for the frontend visualization
'''
# In Google Colab, running 6 period bagging would need:
# CPU times: user 24.5 s, sys: 520 ms, total: 25 s
# Wall time: 25 s
# https://colab.research.google.com/drive/1r4uvFA6RNV35lO3JcYoO5Psz_EVhmNu0
## Get reivews from database
current_date = datetime.strptime('2018-11-30', '%Y-%m-%d')
past_date = current_date - timedelta(days=days_per_period * periods -1)
reviews = getYelpReviews(business_id,
starting_date=past_date,
ending_date=current_date)
if reviews == []:
return
df_reviews = pd.DataFrame(reviews, columns=['date', 'text'])
df_reviews['date']= | pd.to_datetime(df_reviews['date']) | pandas.to_datetime |
"""
中国国家统计局数据模块
作者:gansaihua
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas as pd
from datetime import date
from calendar import monthrange
from cnswd.websource.base import get_page_response, friendly_download
HOST_URL = "http://data.stats.gov.cn/easyquery.htm"
QUARTERLY_SUFFIX_MAPS = {
1: 'A',
2: 'B',
3: 'C',
4: 'D'
}
QUARTERLY_SUFFIX_MAPS_INV = {v: k for k, v in QUARTERLY_SUFFIX_MAPS.items()}
def _extract_date(datestr):
year = int(datestr[:4])
month = 12
if len(datestr) == 5: # quarterly
quarter = int(QUARTERLY_SUFFIX_MAPS_INV.get(datestr[4]))
month = quarter * 3
elif len(datestr) == 6: # monthly
month = int(datestr[4:6])
day = monthrange(year, month)[1]
return date(year=year, month=month, day=day)
def _sanitize_date(datestr, freq):
dt = pd.Timestamp(datestr)
freq = freq.strip().lower()
ret = dt.year
if freq == 'quarterly':
ret = '%d%s' % (ret, QUARTERLY_SUFFIX_MAPS.get(dt.quarter))
elif freq == 'monthly':
ret = '%d%s' % (ret, str(dt.day).zfill(2))
return ret
def _freq_to_dbcode(freq):
ref = {
'monthly': 'hgyd',
'quarterly': 'hgjd',
'yearly': 'hgnd',
}
return ref.get(freq.strip().lower())
@friendly_download(times=66, duration=None, max_sleep=1)
def fetch_economics(code, start, end, freq):
'''freq = monthly, quarterly, yearly'''
start = _sanitize_date(start, freq)
end = _sanitize_date(end, freq)
date_rng = start + '-' + end
params = {
'm': 'QueryData',
'rowcode': 'zb',
'colcode': 'sj',
'wds': '[]',
'dbcode': _freq_to_dbcode(freq),
'dfwds': '[{"wdcode":"zb","valuecode":"%s"}, {"wdcode":"sj","valuecode": "%s"}]' % (code, date_rng),
}
r = get_page_response(HOST_URL, method='post', params=params)
records = []
labels = ['code', 'asof_date', 'value']
for record in r.json()['returndata']['datanodes']:
val = record['data']
if val['hasdata']:
code = record['wds'][0]['valuecode']
asof_date = record['wds'][1]['valuecode']
records.append((code, _extract_date(asof_date), val['data']))
df = pd.DataFrame.from_records(records, columns=labels)
return df
def get_codes(freq, node_id='zb'):
'''freq = monthly, quarterly, yearly
public API
'''
return _batch_leaf_codes(freq, node_id)
def get_categories(freq, node_id='zb'):
''' return the categories which are parents
or super-parents codes of series
node_id should be nodes which are super-parents not direct parents of leafs
'''
return _batch_page_codes(freq, node_id)[0]
def _batch_leaf_codes(freq, node_id='zb'):
'''return all the codes of series which are children to the node of node_id
default the root node'''
ret = []
page_codes = _batch_page_codes(freq, node_id)[1]
if page_codes.empty:
page_codes = [node_id]
else:
page_codes = page_codes['id']
for page_code in page_codes:
res = _get_leaf_codes(freq, page_code)
ret.append(res)
if ret:
ret = | pd.concat(ret) | pandas.concat |
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays.sparse import (
SparseArray,
SparseDtype,
)
arr_data = np.array([np.nan, np.nan, 1, 2, 3, np.nan, 4, 5, np.nan, 6])
arr = SparseArray(arr_data)
class TestGetitem:
def test_getitem(self):
dense = arr.to_dense()
for i in range(len(arr)):
tm.assert_almost_equal(arr[i], dense[i])
tm.assert_almost_equal(arr[-i], dense[-i])
def test_getitem_arraylike_mask(self):
arr = SparseArray([0, 1, 2])
result = arr[[True, False, True]]
expected = SparseArray([0, 2])
tm.assert_sp_array_equal(result, expected)
@pytest.mark.parametrize(
"slc",
[
np.s_[:],
np.s_[1:10],
np.s_[1:100],
np.s_[10:1],
np.s_[:-3],
np.s_[-5:-4],
np.s_[:-12],
np.s_[-12:],
np.s_[2:],
np.s_[2::3],
np.s_[::2],
np.s_[::-1],
np.s_[::-2],
np.s_[1:6:2],
np.s_[:-6:-2],
],
)
@pytest.mark.parametrize(
"as_dense", [[np.nan] * 10, [1] * 10, [np.nan] * 5 + [1] * 5, []]
)
def test_getslice(self, slc, as_dense):
as_dense = np.array(as_dense)
arr = SparseArray(as_dense)
result = arr[slc]
expected = SparseArray(as_dense[slc])
| tm.assert_sp_array_equal(result, expected) | pandas._testing.assert_sp_array_equal |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from mr_clean_func_utils import coerce,get_colname_gen,get_cutoff,ic_vec,is_num,row_req,rows
def validate(df,coerce_numeric,coerce_dt,coerce_categorical): # validates input
assert type(df) is pd.DataFrame
column_dict = {}
for element in coerce_numeric + coerce_dt + coerce_categorical: # these lists must be mutually exclusive
assert type(element) is str
assert not element in column_dict
column_dict[element] = True
def rename_cols(df, col_names): # renames columns
if col_names is not None:
col_list = [*col_names]
col_gen = get_colname_gen(df)
for col_name in col_gen():
if len(col_list) >= len(df.columns):
break
else:
col_list.append(col_name)
else:
col_list = [*df.columns]
for index,col_name in enumerate(col_list):
col_list[index] = col_name.strip().lower().replace(' ','_')
df.columns = col_list
def remove_whitespace(df): # removes whitespace from string columns
for column in (column for column in df if not is_num( df[column] ) ):
df[column] = df[column].str.strip()
yield column
def rename_index(df):
if not (type(df.index) is pd.RangeIndex or type(df.index) is pd.DatetimeIndex):
df.reset_index() # reset the index
return True
#%%%
def scrub_str_cols(df,column,char_scrub_cutoff):
# Tries to remove common characters from the front and back of the strings in df[column]
if not is_num(df[column]): # if it's not a numeric
char_scrub_cutoff = get_cutoff(column, char_scrub_cutoff)
from_front,from_back = "",""
flag1, flag2 = True,True
capture1,capture2 = ic_vec(df),ic_vec(df) # Boolean column vectors
iteration = 0
get_colname = get_colname_gen(df)
while flag1 or flag2:
valcounts = df.loc[capture1 >= iteration,column].str[0].value_counts()
flag1 = valcounts[0] >= char_scrub_cutoff * rows(df) if len(valcounts)>0 else False
if flag1:
from_front+=valcounts.index[0]
capture1+=df[column].str.startswith(valcounts.index[0])&(capture1 >= iteration)
df.loc[capture1 > iteration,column] = df.loc[capture1 > iteration,column].str[1:]
valcounts = df.loc[capture2 >= iteration,column].str[-1].value_counts()
flag2 = valcounts[0] >= char_scrub_cutoff * rows(df) if len(valcounts)>0 else False
if flag2:
from_back=valcounts.index[0]+from_back
capture2+=df[column].str.endswith(valcounts.index[0])&(capture2 >= iteration)
df.loc[capture2 > iteration,column] = df.loc[capture2 > iteration,column].str[:-1]
iteration+=1
if len(from_front + from_back) > 0:
# Generate unique column names for each appended column
sieve_col_1 = next(get_colname(column+'_scrubf'))
sieve_col_2 = next(get_colname(column+'_scrubb'))
df[sieve_col_1] = capture1 # Add columns
df[sieve_col_2] = capture2 # Add columns
return (from_front, from_back, column,sieve_col_1,sieve_col_2)
#%%
def coerce_col(df, column,
numeric_cutoff, coerce_numeric,
dt_cutoff, coerce_dt, dt_format,
categorical_cutoff,coerce_categorical):
success = True
if column in coerce_numeric:
coerce(df, column,
pd.to_numeric(df[column], errors = 'coerce'))
elif column in coerce_dt:
if dt_format is None:
coerce(df, column,
pd.to_datetime(df[column],errors = 'coerce',infer_datetime_format = True))
else:
coerce(df, column,
pd.to_datetime(df[column],errors = 'coerce',format = dt_format))
elif column in coerce_categorical:
coerce(df, column, df[column].astype('category'))
else:
success = __infer_coerce(df, column,
get_cutoff( column,numeric_cutoff ),
get_cutoff( column,dt_cutoff ),
get_cutoff( column,categorical_cutoff ) )
return success
def __infer_coerce(df, column,
numeric_cutoff,dt_cutoff,categorical_cutoff):
cat_coerced = df[column].astype('category')
num_coerced = pd.to_numeric(df[column], errors = 'coerce')
dt_coerced = pd.to_datetime(df[column],errors = 'coerce',infer_datetime_format = True)
all_cutoffs = [categorical_cutoff,numeric_cutoff,dt_cutoff]
all_coerced = [cat_coerced,num_coerced,dt_coerced]
all_counts = [coerced.count() for coerced in all_coerced]
all_counts[0] = rows(df)-len(all_coerced[0].value_counts())
all_scores = [count-row_req(df,cutoff) for count,cutoff in zip(all_counts,all_cutoffs)]
high_score = max(*all_scores)
for index in range(3):
if all_scores[index] == high_score and \
all_scores[index] >= 0:
coerce(df, column, all_coerced[index])
return True
return False
def preview(df,preview_rows = 5,preview_max_cols = 0):
""" Returns a preview of a dataframe, which contains both header
rows and tail rows.
"""
assert type(df) is pd.DataFrame
if preview_rows <= 0:
preview_rows = 1
initial_max_cols = pd.get_option('display.max_columns')
| pd.set_option('display.max_columns', preview_max_cols) | pandas.set_option |
from time import time
import pandas as pd
from pandas.plotting import parallel_coordinates, andrews_curves
import matplotlib.pyplot as plt
from bokeh.plotting import figure, output_file, show
from bokeh.models import ColumnDataSource, HoverTool
from bokeh.palettes import Inferno
import numpy as np
from scipy.stats import gaussian_kde
from utils import load_config
from gps_analysis import TraceAnalysis, Trace
def gkde(data, gridsize):
"""
:param data: pd timeserie
:return: x grid & gaussian kde series
"""
bw = 1/data.std()
x_grid = np.linspace(data.min(), data.max(), gridsize)
gkde = gaussian_kde(data, bw_method=bw)
gkde_pdf = gkde(x_grid)
return (x_grid, gkde_pdf)
def build_crunch_df(df, result_types):
if not result_types:
return
# df2 = df[df.description.isin(result_types)].pivot_table(
# values=["result"],
# index=["n"],
# columns=["description"],
# aggfunc=np.mean,
# dropna=True,
# )
# df2.result[result_type]
df2 = pd.DataFrame(
data={
result_type: df[df.description == result_type].result
for result_type in result_types
if (df.description == result_type).any()
}
)
return df2
def all_results_speed_density(all_results):
"""crunch data with all ranking results"""
if all_results is None:
return
trace = Trace()
result_types = [r for k,v in trace.ranking_groups.items() for r in v]
# do not plot 0 results:
all_results.astype({'result':'float64'}).dtypes
# density plot:
all_results2 = all_results.reset_index(drop=True)
df = build_crunch_df(all_results2, result_types)
p = figure(x_axis_label="speed (kn)")
try:
for result_type, color in zip(result_types, Inferno[len(result_types)]):
data = df[result_type].dropna()
x, pdf = gkde(data, 100)
p.line(x, pdf, color=color, line_width=3, legend_label=f'gkde density of {result_type}')
except Exception as e:
print(e)
return p
def bokeh_speed(gpsana_client):
p = figure(x_axis_type="datetime", x_axis_label="time")
dfs = pd.DataFrame(index=gpsana_client.ts.index)
dfs["raw_speed"] = gpsana_client.raw_ts
dfs.loc[dfs.raw_speed>55, "raw_speed"] = 55
dfs["speed"] = gpsana_client.ts
dfs["speed_no_doppler"] = gpsana_client.tsp
dfs.loc[dfs.speed_no_doppler > 55, "speed_no_doppler"] = 55
dfs = dfs.reset_index()
source = ColumnDataSource(dfs)
p.line(x='time', y='raw_speed', source=source, color='blue', line_width=2, legend_label='unfiltered raw speed')
p.line(x='time', y='speed', source=source, color='red', legend_label = 'doppler speed')
p.line(x='time', y='speed_no_doppler', source=source, color='orange', legend_label='positional speed')
return p
def bokeh_speed_density(gpsana_client, s):
xs = f"{int(10)}S"
p = figure(x_axis_label="speed (kn)")
dfs = pd.DataFrame(index=gpsana_client.ts.index)
dfs["speed"] = gpsana_client.ts
dfs["speed_xs"] = gpsana_client.ts.rolling(xs).mean()
dfs = dfs.reset_index()
dfs.dropna(inplace=True)
x_grid_xs, pdf_xs = gkde(dfs.speed_xs, 200)
x_grid, pdf = gkde(dfs.speed, 200)
p.line(x_grid, pdf, color='blue', legend_label=f'gkde density of filtered max doppler speeds')
p.line(x_grid_xs, pdf_xs, color='red', legend_label=f'gkde density of v{xs} speeds')
return p
def compare_all_results_density(all_results, gpsana_client, result_types):
if all_results is None:
return
# do not plot 0 results:
all_results.astype({'result':'float64'}).dtypes
# density plot:
all_results2 = all_results.reset_index(drop=True)
df = build_crunch_df(all_results2, result_types)
results = build_crunch_df(gpsana_client.gpx_results, result_types)
p = figure(x_axis_label='speed (kn)')
#result = gpsana_client.gpx_results[gpsana_client.gpx_results.description.isin(result_types)].result
#result_list = list(pd.Series.to_numpy(result))
colors = Inferno[len(result_types)+1]
for result_type, color in zip(result_types, colors):
data = df[result_type].dropna()
data = data[data>0]
result = results[result_type].dropna()
result = result[result>0]
if len(data)>1 and len(results)>0:
x, pdf = gkde(data, 100)
h = pdf.max()
p.line(x, pdf, color=color, line_width=3, legend_label=f'gkde density of {result_type}')
for r in pd.Series.to_numpy(result):
p.line([r, r], [0, h], color=color, line_width=2)
# for r, c in zip(result_list, colors[1:]):
# p.line([r, r], [0, h], color=c, line_width=2)
return p
def process_config_plot(all_results, config_plot_file):
"""deprecated"""
if all_results is None:
return
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2)
config_plot = load_config(config_plot_file)
# do not plot 0 results:
all_results.astype({'result':'float64'}).dtypes
all_results.loc[all_results.result < 10, 'result'] = np.nan
result_types = config_plot.get('distribution', [])
# density plot:
all_results2 = all_results.reset_index(drop=True)
df = build_crunch_df(all_results2, result_types)
df.plot.kde(ax=ax1)
ax1.set_title("speed density")
ax1.set_xlabel("speed (kn)")
# parallel coordinates plot:
all_results2 = all_results.set_index(pd.to_datetime(all_results.date))
all_results2 = all_results2.set_index(all_results2.index.year)
df = build_crunch_df(all_results2, result_types)
df = df.reset_index()
df0 = df.copy()
| parallel_coordinates(df, 'date', colormap="winter", ax=ax2) | pandas.plotting.parallel_coordinates |
import os, re, json, sys, random, copy, argparse, torch
import numpy as np
import pandas as pd
from collections import OrderedDict
from tqdm import tqdm
from util import *
import warnings
warnings.filterwarnings('ignore')
# remove duplicates from df_a
def RemoveDuplicates(df_a, df_b, cols):
df = pd.concat([df_a, df_b[cols], df_b[cols]]).drop_duplicates(subset=cols, keep=False, ignore_index=True)
return df
# preprocessing for IEDB MS data
def MSPreprocess(filename, min_peptide_length, max_peptide_length):
df = pd.read_csv(filename)
# rename columns
rename_columns = ["%s - %s"%(i.split(".")[0], df.iloc[0][i]) for i in df.columns]
df = df.rename(columns={df.columns[i]: rename_columns[i] for i in range(len(rename_columns))})
df = df.drop(0)
# filter
df = df[["Reference - MHC ligand ID",
"Reference - PubMed ID",
"Epitope - Description",
"Epitope - Starting Position",
"Epitope - Ending Position",
"Epitope - Parent Protein Accession",
"Antigen Processing Cells - Cell Tissue Type",
"Antigen Processing Cells - Cell Type",
"MHC - Allele Name",
"Host - MHC Types Present"]]
# epitope length
df["Epitope - Length"] = df["Epitope - Description"].apply(lambda x: len(x))
df = df[(df["Epitope - Length"] >= min_peptide_length) & (df["Epitope - Length"] <= max_peptide_length)]
# mono-allelic
df = df[df["MHC - Allele Name"].str.match(r'^HLA-[A/B/C]\*\d+\:\d+$')]
df["MHC - Allele Name"] = df["MHC - Allele Name"].apply(lambda x: x.replace("HLA-",""))
return df
# preprocessing for IEDB assay data
def AssayPreprocess(filename, species, min_peptide_length, max_peptide_length):
df = pd.read_csv(filename, sep='\t')
df = df[df["species"] == species]
df = df[df["mhc"].str.contains("HLA-[ABC]\*\d+\:\d+")]
df["mhc"] = df["mhc"].apply(lambda x: x.replace("HLA-",""))
df = df[(df["peptide_length"] >= min_peptide_length) & (df["peptide_length"] <= max_peptide_length)]
df["value"] = df["meas"].apply(lambda x: max(1 - np.log10(x)/np.log10(50000), 0))
df["bind"] = (df["meas"] <= 500).astype(int)
df["source"] = "assay"
return df
# build hit dataframe
def BuildHit(df):
hit_df = df[[
"MHC - Allele Name",
"Epitope - Parent Protein Accession",
"Epitope - Starting Position",
"Epitope - Length",
"Epitope - Description"
]]
hit_df = hit_df.rename(columns={
"MHC - Allele Name": "mhc",
"Epitope - Parent Protein Accession": "protein",
"Epitope - Starting Position": "start_pos",
"Epitope - Length": "peptide_length",
"Epitope - Description": "sequence"
})
hit_df["meas"] = 1
hit_df["value"] = 1
hit_df["bind"] = 1
hit_df["source"] = "MS"
return hit_df
# build decoy from the same protein of the hit sample
def BuildProtDecoy(prot_dict, prot_len_dict, df, len_dict):
decoy_list = list()
alleles = list(df['mhc'].unique())
for allele in tqdm(alleles):
temp_df = df[(df['mhc'] == allele) & (df['bind'] == 1)]
prots = list(temp_df['protein'].unique())
for prot in prots:
pos_num = temp_df[temp_df['protein'] == prot].shape[0]
start_pos_list = list(temp_df[temp_df['protein'] == prot]['start_pos'].unique())
for length, multiple in len_dict.items():
decoy_num = multiple * pos_num
try:
candidate_pos = [i for i in range(prot_len_dict[prot] - length)
if i not in start_pos_list]
except:
continue
candidate_pos = random.sample(candidate_pos, min(len(candidate_pos), decoy_num))
for pos in candidate_pos:
d = {'mhc': allele,
'protein': prot,
'start_pos': pos,
'peptide_length': length,
'sequence': prot_dict[prot][pos: pos+length]}
decoy_list.append(d)
decoy_df = pd.DataFrame(decoy_list)
decoy_df = decoy_df.drop_duplicates(ignore_index=True)
decoy_df["meas"] = 50000
decoy_df["value"] = 0
decoy_df['bind'] = 0
decoy_df['source'] = 'protein_decoy'
return decoy_df
# build decoy from random peptides
def BuildRandomDecoy(prot_dict, prot_len_dict, df, len_dict):
decoy_list = list()
prot_list = list(prot_dict.keys())
alleles = list(df['mhc'].unique())
for allele in tqdm(alleles):
pos_num = df.loc[(df['mhc'] == allele) & (df['bind'] == 1)].shape[0]
for length, multiple in len_dict.items():
decoy_num = multiple * pos_num
for i in range(decoy_num):
choose = False
while not choose:
prot_id = random.choice(prot_list)
try:
start_pos = random.choice(range(prot_len_dict[prot_id]-length))
choose = True
except:
choose = False
d = {'mhc': allele,
'protein': prot_id,
'start_pos': start_pos,
'peptide_length': length,
'sequence': prot_dict[prot_id][start_pos: start_pos+length]}
decoy_list.append(d)
decoy_df = pd.DataFrame(decoy_list)
decoy_df = decoy_df.drop_duplicates(ignore_index=True)
decoy_df["meas"] = 50000
decoy_df["value"] = 0
decoy_df['bind'] = 0
decoy_df['source'] = 'random_decoy'
return decoy_df
def ArgumentParser():
description = """
Build dataset for downsampling model
Input contains:
1. MS data (.csv)
2. Assay data (.txt)
3. UniProt data (.json)
Arguments:
1. train_valid_prop
2. random_seed
3. encoding_method: OneHot or Blosum
4. train_decoy_ratio
5. test_decoy_ratio
Output:
1. dataframe/[train_hit / train_decoy_n / valid / test].csv
2. [encoding_method]/[train_hit / train_decoy_n / valid / test].pt
the shape of x: (encoding size, 15(epitope length))
the shape of y: (3,) ([index, classification value, regression value])
"""
parser = argparse.ArgumentParser(prog="BuildDatasetDownsampling", description=description, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--ms_data', required=True, help='the csv filename of MS data')
parser.add_argument('--assay_data', required=True, help='the txt filename of assay data')
parser.add_argument('--uniprot', required=True, help='the json filename of UniProt data')
parser.add_argument('--train_valid_prop', required=False, type=float, default=0.95, help='default = 0.95')
parser.add_argument('--random_seed', required=False, type=int, default=0, help='default = 0')
parser.add_argument('--encoding_method', required=True, help='onehot or blosum')
parser.add_argument('--train_decoy_times', required=True, type=int, help='decoy size of each length / hit size of training')
parser.add_argument('--test_decoy_times', required=True, type=int, help='decoy size of each length / hit size of testing')
parser.add_argument('--output_dir', required=True, help='the dirname of output')
return parser
if __name__ == "__main__":
"""""""""""""""""""""""""""""""""
# Loading data and arguments
"""""""""""""""""""""""""""""""""
print("Loading data and arguemnts...")
min_peptide_length, max_peptide_length = 8, 15
args = ArgumentParser().parse_args()
# IEDB data
ms_df = MSPreprocess(args.ms_data, min_peptide_length, max_peptide_length)
assay_df = AssayPreprocess(args.assay_data, "human", min_peptide_length, max_peptide_length)
# UniProt data
uniprot_dict = json.load(open(args.uniprot, 'r'))
uniprot_len_dict = dict()
for k, v in uniprot_dict.items():
uniprot_len_dict[k] = len(v)
# Basic arguments
unique_columns = ["mhc", "sequence"] # for removing duplicates
test_ref_id = 31844290
train_valid_prop = args.train_valid_prop
random_seed = args.random_seed
encoding_method = args.encoding_method
# Decoy arguments
train_decoy_times = args.train_decoy_times
test_decoy_times = args.test_decoy_times
prot_decoy_len_dict = dict({i:2 for i in range(8, 16)})
train_random_decoy_len_dict = dict({i: train_decoy_times for i in range(8, 16)})
test_random_decoy_len_dict = dict({i: test_decoy_times for i in range(8, 16)})
# output directory
output_dir = args.output_dir
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
if not os.path.isdir('%s/dataframe'%output_dir):
os.mkdir('%s/dataframe'%output_dir)
if not os.path.isdir('%s/%s'%(output_dir, encoding_method)):
os.mkdir('%s/%s'%(output_dir, encoding_method))
"""""""""""""""""""""""""""""""""
# Build MS positive data
"""""""""""""""""""""""""""""""""
print("Build MS positive data...")
# split test df
test_df = ms_df[ms_df["Reference - PubMed ID"] == test_ref_id]
non_test_df = ms_df.drop(test_df.index)
# df preprocess
test_df = BuildHit(test_df)
non_test_df = BuildHit(non_test_df)
# drop duplicates
non_test_df = non_test_df.drop_duplicates(subset=unique_columns, ignore_index=True)
test_df = test_df.drop_duplicates(subset=unique_columns, ignore_index=True)
# test_df = test_df - non_test_df
test_df = RemoveDuplicates(test_df, non_test_df, unique_columns)
"""""""""""""""""""""""""""""""""
# Build MS decoy data
"""""""""""""""""""""""""""""""""
print("Build MS decoy data...")
test_random_decoy_df = BuildRandomDecoy(uniprot_dict, uniprot_len_dict, test_df, test_random_decoy_len_dict)
test_prot_decoy_df = BuildProtDecoy(uniprot_dict, uniprot_len_dict, test_df, prot_decoy_len_dict)
non_test_random_decoy_df = BuildRandomDecoy(uniprot_dict, uniprot_len_dict, non_test_df, train_random_decoy_len_dict)
non_test_prot_decoy_df = BuildProtDecoy(uniprot_dict, uniprot_len_dict, non_test_df, prot_decoy_len_dict)
test_decoy_df = pd.concat([test_random_decoy_df, test_prot_decoy_df]).drop_duplicates(ignore_index=True)
non_test_decoy_df = pd.concat([non_test_random_decoy_df, non_test_prot_decoy_df]).drop_duplicates(ignore_index=True)
# test_decoy_df = test_decoy_df - test_df - non_test_df
print("Remove duplicates of test_decoy_df...")
print("before removing, data size = ", test_decoy_df.shape)
test_decoy_df = RemoveDuplicates(test_decoy_df, test_df, unique_columns)
test_decoy_df = RemoveDuplicates(test_decoy_df, non_test_df, unique_columns)
print("after removing, data size = ", test_decoy_df.shape)
# non_test_decoy_df = non_test_decoy_df - test_df - non_test_df - test_decoy_df
print("Remove duplicates of non_test_decoy_df...")
print("before removing, data size = ", non_test_decoy_df.shape)
non_test_decoy_df = RemoveDuplicates(non_test_decoy_df, test_df, unique_columns)
non_test_decoy_df = RemoveDuplicates(non_test_decoy_df, non_test_df, unique_columns)
non_test_decoy_df = RemoveDuplicates(non_test_decoy_df, test_decoy_df, unique_columns)
print("after removing, data size = ", non_test_decoy_df.shape)
"""""""""""""""""""""""""""""""""
# Split training and validation data
"""""""""""""""""""""""""""""""""
print("Split training and validation data...")
# MS data
train_df = non_test_df.sample(frac=train_valid_prop, random_state=random_seed)
valid_df = non_test_df.drop(train_df.index).reset_index(drop=True)
train_df = train_df.reset_index(drop=True)
train_decoy_df = non_test_decoy_df.sample(frac=train_valid_prop, random_state=random_seed)
valid_decoy_df = non_test_decoy_df.drop(train_decoy_df.index).reset_index(drop=True)
train_decoy_df = train_decoy_df.reset_index(drop=True)
# assay data
assay_train_df = assay_df.sample(frac=train_valid_prop, random_state=random_seed)
assay_valid_df = assay_df.drop(assay_train_df.index).reset_index(drop=True)
assay_train_df = assay_train_df.reset_index(drop=True)
"""""""""""""""""""""""""""""""""
# Save dataframe and dataset
"""""""""""""""""""""""""""""""""
print("Save dataframe and dataset...")
common_columns = ["sequence", "peptide_length", "mhc", "meas", "value", "bind", "source"]
# train_hit (MS + assay)
print("Current: train_hit...")
train_hit_df = pd.concat([train_df[common_columns], assay_train_df[common_columns]], ignore_index=True)
train_hit_df.to_csv('%s/dataframe/train_hit.csv'%output_dir)
train_hit_num = train_hit_df.shape[0]
dataset = BuildDataset(train_hit_df, encoding_method, max_peptide_length)
torch.save(dataset, "%s/%s/train_hit.pt"%(output_dir, encoding_method))
# train_decoy
decoy_file_num = int(np.floor(train_decoy_df.shape[0] / train_df.shape[0]))
decoy_idx = train_decoy_df.index.to_numpy()
np.random.seed(random_seed)
np.random.shuffle(decoy_idx)
decoy_idx_list = np.array_split(decoy_idx, decoy_file_num)
print("Number of decoy file: ", decoy_file_num)
for i in range(decoy_file_num):
print("Current: train_decoy_%d..."%(i+1))
# make sure the decoy index within the number limitation(16777216) of pytorch tensor
# decoy_1 and decoy_21 have the same starting index
if i % 20 == 0:
start_idx = train_hit_num
temp_decoy_df = train_decoy_df.loc[decoy_idx_list[i], common_columns]
temp_decoy_df = temp_decoy_df.set_index(pd.Series(range(start_idx, temp_decoy_df.shape[0]+start_idx)))
start_idx += temp_decoy_df.shape[0]
temp_decoy_df.to_csv('%s/dataframe/train_decoy_%d.csv'%(output_dir, i+1))
dataset = BuildDataset(temp_decoy_df, encoding_method, max_peptide_length)
torch.save(dataset, '%s/%s/train_decoy_%d.pt'%(output_dir, encoding_method, i+1))
# validation
print("Current: valid...")
valid_df = pd.concat([valid_df[common_columns], valid_decoy_df[common_columns], assay_valid_df[common_columns]], ignore_index=True)
valid_df.to_csv('%s/dataframe/valid.csv'%output_dir)
dataset = BuildDataset(valid_df, encoding_method, max_peptide_length)
torch.save(dataset, "%s/%s/valid.pt"%(output_dir, encoding_method))
# testing
print("Current: test...")
test_df = | pd.concat([test_df[common_columns], test_decoy_df[common_columns]], ignore_index=True) | pandas.concat |
from contextlib import contextmanager
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.session import make_transient
import datetime
from unittest.mock import Mock
from numpy.testing import assert_array_almost_equal
import pandas
from triage.component.results_schema import TestPrediction, Matrix, Model
from triage.component.catwalk.storage import TestMatrixType
from triage.component.catwalk.db import ensure_db
from tests.results_tests.factories import (
MatrixFactory,
ModelFactory,
PredictionFactory,
init_engine,
session as factory_session
)
from triage.database_reflection import table_has_data
from triage.component.catwalk.predictors import Predictor
from tests.utils import (
MockTrainedModel,
matrix_creator,
matrix_metadata_creator,
get_matrix_store,
rig_engines,
)
import pytest
with_matrix_types = pytest.mark.parametrize(
('matrix_type',),
[
('train',),
('test',),
],
)
MODEL_RANDOM_SEED = 123456
@contextmanager
def prepare():
with rig_engines() as (db_engine, project_storage):
train_matrix_uuid = "1234"
try:
session = sessionmaker(db_engine)()
session.add(Matrix(matrix_uuid=train_matrix_uuid))
# Create the fake trained model and store in db
trained_model = MockTrainedModel()
model_hash = "abcd"
project_storage.model_storage_engine().write(trained_model, model_hash)
db_model = Model(
model_hash=model_hash,
train_matrix_uuid=train_matrix_uuid,
random_seed=MODEL_RANDOM_SEED
)
session.add(db_model)
session.commit()
yield project_storage, db_engine, db_model.model_id
finally:
session.close()
@pytest.fixture(name='predict_setup_args', scope='function')
def fixture_predict_setup_args():
with prepare() as predict_setup_args:
yield predict_setup_args
@pytest.fixture(name='predictor', scope='function')
def predictor(predict_setup_args):
(project_storage, db_engine, model_id) = predict_setup_args
return Predictor(project_storage.model_storage_engine(), db_engine, rank_order='worst')
@pytest.fixture(name='predict_proba', scope='function')
def prediction_results(matrix_type, predictor, predict_setup_args):
(project_storage, db_engine, model_id) = predict_setup_args
dayone = datetime.datetime(2011, 1, 1)
daytwo = datetime.datetime(2011, 1, 2)
source_dict = {
"entity_id": [1, 2, 3, 1, 2, 3],
"as_of_date": [dayone, dayone, dayone, daytwo, daytwo, daytwo],
"feature_one": [3] * 6,
"feature_two": [5] * 6,
"label": [True, False] * 3
}
matrix = | pandas.DataFrame.from_dict(source_dict) | pandas.DataFrame.from_dict |
import logging
from typing import List, Dict, Tuple
import numpy as np
import pandas as pd
from message_ix.utils import make_df
from pandas.io.json import json_normalize
from d2ix import Data, ModelPar, RawData
from d2ix.util import split_columns
from d2ix.util.acitve_year_vector import get_act_year_vector, get_years_no_hist_cap
logger = logging.getLogger(__name__)
YearVector = List[int]
def add_technology(data: Data, model_par: ModelPar, first_model_year: int, active_years: YearVector,
historical_years: YearVector, duration_period_sum: pd.DataFrame, loc: str, par: str,
slack: bool = False) -> ModelPar:
if slack is True:
technology, technology_exist = _get_slack_techs(data, loc, par)
else:
technology, technology_exist = _get_location_techs(data, loc, par)
if technology_exist:
for tech in technology.keys():
params: Dict = _get_df_tech(technology, tech)
tech_hist = model_par.get('historical_new_capacity')
years_no_hist_cap = get_years_no_hist_cap(loc, tech, historical_years, tech_hist)
tech_parameters = _get_active_model_par(data, params)
for tech_par in tech_parameters:
model_par[tech_par] = _add_parameter(model_par, params, tech_par, tech, loc, active_years,
first_model_year, duration_period_sum, years_no_hist_cap)
return model_par
def _add_parameter(model_par: ModelPar, params: Dict[str, pd.DataFrame], tech_par: str, tech: str, loc: str,
active_years: YearVector, first_model_year: int, duration_period_sum: pd.DataFrame,
years_no_hist_cap: YearVector) -> pd.DataFrame:
df = model_par[tech_par]
# emission helper
_emissions_is_list = _check_emissions_is_list(params)
# single input - double output
if isinstance(params['in_out'].loc['output', 'level'], list) and tech_par == 'output':
_level = params['in_out'].loc['output', 'level']
_com = params['in_out'].loc['output', 'commodity']
_df_out = params['year_vtg'][
(params['year_vtg']['par_name'] == 'output') & (params['year_vtg']['par'] == 'value')]
_df_out_val = pd.DataFrame(_df_out.val.values.tolist(), index=_df_out.index)
_df_list = []
for _out in range(len(_com)):
params['year_vtg'].loc[_df_out_val.index, 'val'] = _df_out_val[_out]
params['in_out'].loc['output', 'level'] = _level[_out]
params['in_out'].loc['output', 'commodity'] = _com[_out]
df_base_dict = _create_parameter_df(params, tech_par, df, first_model_year, active_years,
duration_period_sum, years_no_hist_cap)
_df_list.append(df_base_dict)
df_base_dict = pd.concat(_df_list, ignore_index=True)
# emissions: C02 and CH4
elif _emissions_is_list and tech_par == 'emission_factor':
_emission = params['others'].loc['emission'].val
_df_em = params['year_vtg'][
(params['year_vtg']['par_name'] == 'emission_factor') & (params['year_vtg']['par'] == 'value')]
_df_em_val = pd.DataFrame(_df_em.val.values.tolist(), index=_df_em.index)
_df_list = []
for _emi in range(len(_emission)):
params['others'].loc['emission'].val = _emission[_emi]
params['year_vtg'].loc[_df_em_val.index, 'val'] = _df_em_val[_emi]
df_base_dict = _create_parameter_df(params, tech_par, df, first_model_year, active_years,
duration_period_sum, years_no_hist_cap)
_df_list.append(df_base_dict)
df_base_dict = pd.concat(_df_list, ignore_index=True)
else:
df_base_dict = _create_parameter_df(params, tech_par, df, first_model_year, active_years, duration_period_sum,
years_no_hist_cap)
model = pd.concat([df, df_base_dict])
logger.debug(f'Create parameter in location \'{loc}\' for \'{tech}\': \'{tech_par}\'')
return model
def _create_parameter_df(params: Dict[str, pd.DataFrame], model_par: str, df: pd.DataFrame, first_model_year: int,
active_years: YearVector, duration_period_sum: pd.DataFrame,
years_no_hist_cap: YearVector) -> pd.DataFrame:
model_par_vtg = params['year_vtg'][
(params['year_vtg']['par_name'] == model_par) & (~params['year_vtg']['year_vtg'].isin(years_no_hist_cap))]
model_par_act = params['year_vtg'][
(params['year_vtg']['par_name'] == model_par) & (params['year_vtg']['year_vtg'].isin(active_years))]
# fill DataFrame
base_dict = dict.fromkeys(df.columns)
# base_dict = df.to_dict()
keys = base_dict.keys()
for i in keys:
if i in params['others'].index:
# load data not depends on year_vtg or year_act
base_dict[i] = params['others'].loc[i].val
elif i in params['in_out'].columns:
# load output and/or input data
base_dict[i] = params['in_out'].loc[model_par][i]
else:
if ('year_act' in keys and 'year_vtg' in keys) or ('year_vtg' in keys):
# parameter depends on year_vtg or (year_act and year_vtg)
if i == 'year_act':
# load data for year tuples (year_vtg, year_act)
base_dict.update(
_create_dict_year_act(params, model_par, base_dict, first_model_year, duration_period_sum,
years_no_hist_cap))
elif i == 'year_vtg' and 'year_act' not in keys:
# load year_vtg data if only depends on year_vtg
base_dict[i] = model_par_vtg.year_vtg[model_par_vtg.par == 'value'].tolist()
elif (i == 'unit' or i == 'value') and (
'year_act' not in keys):
# load value and unit data
base_dict[i] = model_par_vtg.val[model_par_vtg.par == i].tolist()
if i == 'year_act' and 'year_vtg' not in keys:
# parameter depends only on year_act
base_dict['year_act'] = model_par_act.year_vtg[model_par_act.par == 'value'].tolist()
base_dict['unit'] = model_par_act.val[model_par_act.par == 'unit'].tolist()
base_dict['value'] = model_par_act.val[model_par_act.par == 'value'].tolist()
df = pd.DataFrame(base_dict)
if 'additional_pars' in params['others'].index:
add_pars = params['others'].loc['additional_pars', 'val']
if [k for k in add_pars if model_par in k]:
df = _calc_delta_change(active_years, df, model_par, add_pars)
return df
def _create_dict_year_act(params: Dict[str, pd.DataFrame], model_par: str, base_dict: Dict, first_model_year: int,
duration_period_sum: pd.DataFrame, years_no_hist_cap: List[int]) -> Dict:
tec_life = params['year_vtg'][params['year_vtg'].par_name == 'technical_lifetime']
life_val_unit = tec_life[tec_life.par == 'value']
life_val_unit = life_val_unit.assign(unit=tec_life[tec_life.par == 'unit']['val'].values)
_year_vtg = []
_year_act = []
_value = []
_unit = []
_par_data = params['year_vtg'][params['year_vtg'].par_name == model_par].copy()
_par_data_val = _par_data[_par_data['par'] == 'value'].set_index('year_vtg').to_dict(orient='index')
_par_data_unit = _par_data[_par_data['par'] == 'unit'].set_index('year_vtg').to_dict(orient='index')
tech_years = [i for i in life_val_unit['year_vtg'].values if i not in years_no_hist_cap]
last_tech_year = tech_years[-1]
for y in tech_years:
year_life_time = list(life_val_unit[life_val_unit.year_vtg == y]['val'])[0]
year_vec = get_act_year_vector(duration_period_sum, y, year_life_time, first_model_year, last_tech_year,
years_no_hist_cap)
_year_vtg.extend(year_vec.vintage_years)
_year_act.extend(year_vec.act_years)
_value.extend(len(year_vec.vintage_years) * [_par_data_val[y]['val']])
_unit.extend(len(year_vec.vintage_years) * [_par_data_unit[y]['val']])
base_dict['year_vtg'] = _year_vtg
base_dict['year_act'] = _year_act
base_dict['value'] = _value
base_dict['unit'] = _unit
return base_dict
def _get_active_model_par(data: Data, _param: Dict[str, pd.DataFrame]) -> List[str]:
tech_model_par = ['technology']
tech_model_par.extend(_param['in_out'].index.tolist())
tech_model_par.extend(_param['year_vtg'].par_name[~_param['year_vtg'].par_name.duplicated()].values.tolist())
tech_model_par = sorted(list(set(tech_model_par)))
tech_model_par = [model_par for model_par in data['technology_parameter'] if model_par in tech_model_par]
return tech_model_par
def _calc_delta_change(active_years: YearVector, df: pd.DataFrame, par: str,
add_pars: Dict[str, float]) -> pd.DataFrame:
reference_year = __get_ref_year(df, active_years)
if f'd_{par}_vtg' in add_pars.keys():
df = _comp_int(reference_year, df, par, add_pars, 'vtg')
if f'd_{par}_act' in add_pars.keys():
df = _comp_int(reference_year, df, par, add_pars, 'act')
return df
def add_reliability_flexibility_parameter(data: Data, model_par: ModelPar,
raw_data: RawData) -> Dict[str, pd.DataFrame]:
rel_flex = raw_data['base_input']['rel_and_flex']
model = {}
_rating_bin = []
_reliability_factor = []
_flexibility_factor = []
rating_bin_unit = data['units']['rating_bin']['unit']
reliability_factor_unit = data['units']['reliability_factor']['unit']
flexibility_factor_unit = data['units']['flexibility_factor']['unit']
output: pd.DataFrame = model_par['output'].copy()
for i in rel_flex.index:
node = rel_flex.at[i, 'node']
technology = rel_flex.at[i, 'technology']
mode = data['technology'][technology]['mode']
commodity = rel_flex.at[i, 'commodity']
level = rel_flex.at[i, 'level']
time = rel_flex.at[i, 'time']
rating = rel_flex.at[i, 'rating']
logger.debug(f'Create reliability flexibility parameters for {technology} in {node}')
rating_bin_value = rel_flex.at[i, 'rating_bin']
reliability_factor_value = rel_flex.at[i, 'reliability_factor']
flexibility_factor_value = rel_flex.at[i, 'flexibility_factor']
_output_technology = output[output['technology'] == technology]
_model_years = list(_output_technology['year_act'].unique())
_active_years = list(_output_technology['year_act'])
_vintage_years = list(_output_technology['year_vtg'])
base_par = pd.DataFrame(
{'node': node, 'technology': technology, 'year_act': _model_years, 'commodity': commodity, 'level': level,
'time': time, 'rating': rating})
_rating_bin.append(make_df(base_par, value=rating_bin_value, unit=rating_bin_unit))
_reliability_factor.append(make_df(base_par, value=reliability_factor_value, unit=reliability_factor_unit))
base_flex = pd.DataFrame(
{'node_loc': node, 'technology': technology, 'year_act': _active_years, 'year_vtg': _vintage_years,
'commodity': commodity, 'level': level, 'mode': mode, 'time': time, 'rating': rating})
_flexibility_factor.append(make_df(base_flex, value=flexibility_factor_value, unit=flexibility_factor_unit))
rating_bin = pd.concat(_rating_bin, sort=False, ignore_index=True)
rating_bin['year_act'] = rating_bin['year_act'].astype(int)
model['rating_bin'] = rating_bin
reliability_factor = pd.concat(_reliability_factor, sort=False, ignore_index=True)
reliability_factor['year_act'] = reliability_factor['year_act'].astype(int)
model['reliability_factor'] = reliability_factor
flexibility_factor = pd.concat(_flexibility_factor, sort=False, ignore_index=True)
flexibility_factor['year_act'] = flexibility_factor['year_act'].astype(int)
flexibility_factor['year_vtg'] = flexibility_factor['year_vtg'].astype(int)
model['flexibility_factor'] = flexibility_factor
return model
def create_renewable_potential(raw_data: RawData, data: Data, active_years: YearVector) -> Dict[str, pd.DataFrame]:
renewable_potential = raw_data['base_input']['renewable_potential']
model = {}
re_year = renewable_potential.iloc[
np.repeat(np.arange(len(renewable_potential)), len(active_years))]
re_year = re_year.assign(
year=active_years * renewable_potential.shape[0])
re_potential = re_year.copy()
re_potential['unit'] = data['units']['renewable_potential']['unit']
re_potential['value'] = re_potential['potential']
model['renewable_potential'] = re_potential[['commodity', 'level', 'grade', 'value', 'node', 'year', 'unit']]
re_cap_factor = re_year.copy()
re_cap_factor['unit'] = data['units']['renewable_capacity_factor']['unit']
re_cap_factor['value'] = re_cap_factor['capacity_factor']
model['renewable_capacity_factor'] = re_cap_factor[['commodity', 'level', 'grade', 'value', 'node', 'year', 'unit']]
return model
def change_emission_factor(raw_data: RawData, model_par: ModelPar) -> Dict[str, pd.DataFrame]:
emissions = raw_data['base_input']['emissions']
emission_factor: pd.DataFrame = model_par['emission_factor']
model = {}
def apply_change(row):
loc = emissions['node_loc'] == row['node_loc']
tech = emissions['technology'] == row['technology']
year = emissions['year_act'] == row['year_act']
emission = emissions['emission'] == row['emission']
ef = emissions[loc & tech & year & emission]
if not ef.empty:
value = ef['value'].values[0]
else:
value = row['value']
return value
emission_factor['value'] = emission_factor.apply(apply_change, axis=1)
model['emission_factor'] = emission_factor
return model
# help functions
def _check_emissions_is_list(params: Dict[str, pd.DataFrame]) -> bool:
if 'emission' in params['others'].index:
if isinstance(params['others'].loc['emission'].val, list):
_emissions_is_list = True
else:
_emissions_is_list = False
else:
_emissions_is_list = False
return _emissions_is_list
def _get_location_techs(data: Data, loc: str, par: str) -> Tuple[Dict[str, dict], bool]:
loc_techs = data['locations'][loc].get(par)
if loc_techs:
techs = [*loc_techs.keys()]
technology = {k: v for k, v in data['technology'].items() if k in techs}
technology = _override_techs(technology, loc_techs, techs)
technology_exist = True
else:
technology = {}
technology_exist = False
return technology, technology_exist
def _override_techs(technology: Dict[str, dict], loc_techs: Dict[str, dict], techs: List[str]) -> Dict[str, dict]:
override = {t: {k: v for k, v in loc_techs[t]['override'].items()} for t in techs}
for t in techs:
technology[t].update(override[t])
return technology
def _get_slack_techs(data, loc: str, par: str) -> Tuple[Dict[str, dict], bool]:
commodity = [data[par][loc]['year'][i].keys() for i in data[par][loc]['year'].keys()]
commodity = [i for l in commodity for i in l]
commodity = sorted(list(set(commodity)))
technology = {}
for c in commodity:
technology['slack_' + c] = data['technology']['slack_' + c]
technology['slack_' + c]['node_loc'] = loc
technology['slack_' + c]['node_dest'] = loc
technology['slack_' + c]['node_origin'] = loc
technology_exist = True
return technology, technology_exist
def _get_df_tech(technology: Dict[str, dict], t: str) -> Dict[str, dict]:
_df = pd.DataFrame.from_dict(technology)
_param = {}
# create pandas Series for a given technology from a dict
df = _df[t].dropna()
if 'year_vtg' in df.keys():
_param['year_vtg'] = __get_df_year(df, year_type='year_vtg')
df = df.drop('year_vtg')
if 'input' in df.index:
in_out = {'input': df['input'],
'output': df['output']}
df_par_in_out = pd.DataFrame.from_dict(in_out, orient='index')
df = df.drop(['input', 'output'])
else:
in_out = {'output': df['output']}
df_par_in_out = | pd.DataFrame.from_dict(in_out, orient='index') | pandas.DataFrame.from_dict |
"""A container to store training, validation and test results. """
from typing import Callable, Dict, Iterable, Optional
import numpy as np
import pandas as pd
import ivory.core.collections
from ivory.core.run import Run
from ivory.core.state import State
class Results(ivory.core.collections.Dict, State):
"""Results callback stores training, validation and test results.
Each result is `ivory.core.collections.Dict` type that has `index`,
`output`, and `target` array.
To get `target` array of validation, use
target = results.val.target
Attributes:
train (Dict): Train results.
val (Dict): Validation results.
test (Dict): Test results.
"""
def __call__(self):
return self.index, self.output, self.target
def reset(self):
self.index = None
self.output = None
self.target = None
def on_train_begin(self, run: Run):
self.reset()
def on_test_begin(self, run: Run):
self.reset()
def step(self, index, output, target=None):
self.index = index
self.output = output
self.target = target
def on_train_end(self, run: Run):
self["train"] = self.result_dict()
self.reset()
def on_val_end(self, run: Run):
self["val"] = self.result_dict()
self.reset()
def on_test_end(self, run: Run):
self["test"] = self.result_dict()
self.reset()
def result_dict(self):
dict = ivory.core.collections.Dict()
index, output, target = self()
return dict(index=index, output=output, target=target)
def mean(self) -> "Results":
"""Returns a reduced `Results` instance aveaged by `index`."""
results = Results()
for mode, result in self.items():
index = result.index
kwargs = {}
for key, value in list(result.items())[1:]:
if value.ndim == 1:
series = pd.Series(value, index=index)
value = series.groupby(level=0).mean()
else:
df = | pd.DataFrame(value) | pandas.DataFrame |
from numpy import NaN
import pandas as pd
import unittest
import numpy as np
'''
The series.count function that we are testing does:
Return number of non-NA/null observations in the Series.
Parameters
----------
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a smaller Series.
Returns
-------
int or Series (if level specified)
Number of non-null values in the Series.
This means that series will count number of things in a series, except for NaN:s.
If the series have multindex (several indexes), you have to pick a level.
A level is a thing used when you have multiple indexes. Say you have two indexes for each entry ,
then level 0 is the first index and level 1 is the second index. Given a multiindex series, count will count
how many values each value in the chosen index holds.
If you have an index [1,2,3] for values [a,b,c], it will return :
1 1
2 1
3 1
If the index for some reason is [1,1,2,3] for [a,b,c,d] it will return:
1 2
2 1
3 1
Finally if one of the values is NaN, index [1,1,2,3] for values [a,b,c,NaN], the NaN isn't counted:
1 2
2 1
3 0
##############################################
Analysis with interface based approach:
The parameters the function takes is a series and maybe a level.
So the input domain can be:
"A series and no level"
"A series with a level"
A note is that you get a warning when you add the levels parameter becaues it will be removed in a future version.
In the futer you will have to perform a group by before you do the count, like this:
s.groupby(level=1).count()
instead of:
pandas.Series.count(s,1)
'''
class TestSeriesCount(unittest.TestCase):
'''Functions for testing the count in pandas series functions. Count returns elements in a series,
but will leave out NaN:s. '''
def setUp(self):
self.sequence1 = [1, 2, 3]
self.sequence2 = [1, 2, NaN]
self.sequence3 = []
self.sequence4 = [0]
self.sequence5 = [NaN]
self.sequence6 = [NaN]*500 + [6,7]
self.sequence7 = [0]*1000000 + [1]
self.sequence8 = pd.Series([1, 2, 3],index=[
np.array([1,2,3]),
np.array([15,16,16]),
])
def test_series_count_blackbox(self):
"A series and no level"
#Want to test just a small ordinary case.
counted_sequence1 = pd.Series.count(pd.Series(self.sequence1))
self.assertEqual(counted_sequence1, 3)
# Testing if NaN is dropped from the count.
counted_sequence2 = pd.Series.count(pd.Series(self.sequence2))
self.assertEqual(counted_sequence2, 2)
# Testing empty series.
counted_sequence3 = pd.Series.count(pd.Series(self.sequence3))
self.assertEqual(counted_sequence3, 0)
# Testing series with just a 0.
counted_sequence4 = pd.Series.count(pd.Series(self.sequence4))
self.assertEqual(counted_sequence4, 1)
# Testing series with just a NaN.
counted_sequence5 = pd.Series.count(pd.Series(self.sequence5))
self.assertEqual(counted_sequence5, 0)
# Testing series with alot of NaN:s and a 2 other.
counted_sequence6 = pd.Series.count(pd.Series(self.sequence6))
self.assertEqual(counted_sequence6, 2)
# Testing series with 1000000 of 0 and a one 1.
counted_sequence7 = pd.Series.count( | pd.Series(self.sequence7) | pandas.Series |
import pytest
import numpy as np
import pandas as pd
from sklearn import clone
from sklearn.preprocessing import LabelEncoder, FunctionTransformer, OneHotEncoder, QuantileTransformer, StandardScaler
from sklearn.preprocessing import PowerTransformer, OrdinalEncoder
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
from data_dashboard.transformer import Transformer
from data_dashboard.transformer import WrapperFunctionTransformer
@pytest.mark.parametrize(
("test_func",),
(
(lambda x: x,),
(lambda x: 1,),
(lambda x: x**2,),
)
)
def test_wrapper_func_transformer(test_func):
"""Testing if WrapperFunctionTransformer still has functionality of an underlying FunctionTransformer."""
test_arr = np.array([1, 1, 1, 2, 3, 4, 5]).reshape(-1, 1)
tr = FunctionTransformer(func=test_func)
wrap_tr = WrapperFunctionTransformer("test", clone(tr))
expected_arr = tr.fit_transform(test_arr)
actual_arr = wrap_tr.fit_transform(test_arr)
assert np.array_equal(actual_arr, expected_arr)
assert str(wrap_tr) != str(tr)
@pytest.mark.parametrize(
("test_text",),
(
("Test1",),
("Test2",),
("",),
)
)
def test_wrapper_func_transformer_str(test_text):
"""Testing if str() function of WrapperFunctionTransformer returns text provided as an argument."""
wrap_tr = WrapperFunctionTransformer(test_text, FunctionTransformer())
assert str(wrap_tr) == test_text
def test_transformer_create_preprocessor_X(categorical_features, numerical_features):
"""Testing if X preprocessor correctly assigns steps to columns depending on their type."""
categorical_features.remove("Target")
tr = Transformer(categorical_features, numerical_features, "Categorical")
preprocessor = tr._create_preprocessor_X()
expected_steps = [("numerical", numerical_features), ("categorical", categorical_features)]
actual_steps = [(item[0], item[2]) for item in preprocessor.transformers]
for step in expected_steps:
assert step in actual_steps
assert len(actual_steps) == len(expected_steps)
@pytest.mark.parametrize(
("target_type", "expected_function"),
(
("Categorical", LabelEncoder()),
("Numerical", WrapperFunctionTransformer("test", FunctionTransformer(lambda x: x)))
)
)
def test_transformer_create_preprocessor_y(categorical_features, numerical_features, target_type, expected_function):
"""Testing if y preprocessor is created correctly."""
tr = Transformer(categorical_features, numerical_features, target_type)
preprocessor = tr._create_default_transformer_y()
assert type(preprocessor).__name__ == type(expected_function).__name__
@pytest.mark.parametrize(
("transformed_feature",),
(
("Price",),
("bool",),
("AgeGroup",)
)
)
def test_transformer_preprocessor_X_remainder(
categorical_features, numerical_features, data_classification_balanced, expected_raw_mapping,
transformed_feature
):
"""Testing if feature not declared in either categorical or numerical features passes through unchanged."""
categorical_features.remove("Target")
categorical_features = [f for f in categorical_features if f != transformed_feature]
numerical_features = [f for f in numerical_features if f != transformed_feature]
X = data_classification_balanced[0].drop(["Date"], axis=1)
if transformed_feature in expected_raw_mapping.keys():
X[transformed_feature] = X[transformed_feature].replace(expected_raw_mapping[transformed_feature])
tr = Transformer(categorical_features, numerical_features, "Numerical")
tr.fit(X)
transformed = tr.transform(X)
try:
transformed = transformed.toarray()
except AttributeError:
transformed = transformed
cols = tr.transformed_columns() + [transformed_feature]
actual_result = pd.DataFrame(transformed, columns=cols)
assert np.allclose(actual_result[transformed_feature].to_numpy(), X[transformed_feature].to_numpy(), equal_nan=True)
# checking if there is only one column with transformed_feature (no derivations)
assert sum([1 for col in cols if transformed_feature in col]) == 1
@pytest.mark.parametrize(
("transformed_features",),
(
(["Height", "Price"],),
(["Price", "AgeGroup"],),
)
)
def test_transformer_preprocessor_X_remainder_order(
categorical_features, numerical_features, data_classification_balanced, expected_raw_mapping,
transformed_features
):
"""Testing if remainder portion of ColumnTransformer returns the columns in the expected (alphabetical) order."""
categorical_features.remove("Target")
categorical_features = [f for f in categorical_features if f not in transformed_features]
numerical_features = [f for f in numerical_features if f not in transformed_features]
X = data_classification_balanced[0].drop(["Date"], axis=1)
tr = Transformer(categorical_features, numerical_features, "Numerical")
tr.fit(X)
transformed = tr.transform(X)
try:
transformed = transformed.toarray()
except AttributeError:
transformed = transformed
cols = tr.transformed_columns() + sorted(transformed_features)
actual_result = | pd.DataFrame(transformed, columns=cols) | pandas.DataFrame |
import streamlit as st
import numpy as np
import pandas as pd # TODO this should be covered by the DataManager
import xarray as xr # TODO this should be covered by the DataManager
import matplotlib.pyplot as plt # TODO this should be moved into plotting submodule
from ruins import components
def load_alldata():
weather = xr.load_dataset('data/weather.nc')
climate = xr.load_dataset('data/cordex_coast.nc')
# WARNING - bug fix for now:
# 'HadGEM2-ES' model runs are problematic and will be removed for now
# The issue is with the timestamp and requires revision of the ESGF reading routines
kys = [s for s in list(climate.keys()) if 'HadGEM2-ES' not in s] #remove all entries of HadGEM2-ES (6 entries)
climate = climate[kys]
return weather, climate
def applySDM(wdata, data, meth='rel', cdf_threshold=0.9999999, lower_limit=0.1):
'''apply structured distribution mapping to climate data and return unbiased version of dataset'''
from sdm import SDM
data_ub = data.copy()
for k in data_ub.columns:
data_col = data_ub[k].dropna()
overlapx = pd.concat(
[wdata.loc[data_col.index[0]:wdata.index[-1]], data_col.loc[data_col.index[0]:wdata.index[-1]]], axis=1)
overlapx.columns = ['obs', 'cm']
overlapx = overlapx.dropna()
try:
data_ub[k] = SDM(overlapx.obs, overlapx.cm, data_col, meth, cdf_threshold, lower_limit)
except:
data_ub[k] = data_ub[k] * np.nan
data_ub[data_ub == 0.0000000] = np.nan
data_ub = data_ub.loc[data_ub.index[0]:pd.to_datetime('2099-12-31 23:59:59')]
return data_ub
def ub_climate(cdata, wdata, ub=True):
varis = ['T', 'Tmax', 'Tmin', 'aP', 'Prec', 'RH', 'Rs', 'u2', 'EToHG']
firstitem = True
for vari in varis:
data = cdata.sel(vars=vari).to_dataframe()
data = data[data.columns[data.columns != 'vars']]
if (vari == 'T') | (vari == 'Tmax') | (vari == 'Tmin'):
meth = 'abs'
else:
meth = 'rel'
if ub:
wdatax = wdata.sel(vars=vari).to_dataframe().iloc[:, -1].dropna()
data_ub = applySDM(wdatax, data, meth=meth)
else:
data_ub = data
data_ubx = data_ub.mean(axis=1)
data_ubx.columns = [vari]
if firstitem:
data_ubc = data_ubx
firstitem = False
else:
data_ubc = pd.concat([data_ubc, data_ubx], axis=1)
data_ubc.columns = varis
return data_ubc
def get_turbine(pfi, plotit=False):
dummy = pd.read_csv(pfi)
v_start = dummy.loc[3].astype(np.float).values[0]
v_max = dummy.loc[2].astype(np.float).values[0]
P_v = dummy.loc[4:28].astype(np.float)
P_v.index = np.arange(int(np.ceil(v_max)) + 1)[1:]
if plotit:
P_v.plot()
return [P_v, v_start, v_max]
def P_wind(wind, pfi):
[P_v, v_start, v_max] = get_turbine(pfi, False)
# stephours = (wind.index[2] - wind.index[1]).days * 24
# stephours = 1.
def interp(val):
if (val >= v_max) | (val < v_start):
return 0.
elif ~np.isnan(val):
if np.ceil(val) == np.floor(val):
return P_v.loc[int(np.floor(val))].values[0]
else:
x1 = P_v.loc[int(np.floor(val))].values[0]
x2 = P_v.loc[int(np.ceil(val))].values[0]
return ((val - np.floor(val)) * x2 + (1. - (val - np.floor(val))) * x1)
else:
return np.nan
ip_vec = np.vectorize(interp)
def get_windpower(u2):
val = np.fmax(-1. * np.cos(np.arange(0., 2 * np.pi, 0.27)) + u2, 0.)
return np.sum(ip_vec(val))
return wind.apply(get_windpower)
def cropmodel(data,crop='wheat',rcp='rcp85',name='croprunname'):
import get_climate_data.cli_crop as clc
#read co2 concentrations
if rcp == 'rcp85':
CO2 = pd.read_csv('data/RCP85_MIDYEAR_CONCENTRATIONS.DAT', skiprows=38, delim_whitespace=True, index_col=0).CO2EQ
elif rcp == 'rcp45':
CO2 = pd.read_csv('data/RCP45_MIDYEAR_CONCENTRATIONS.DAT', skiprows=38, delim_whitespace=True, index_col=0).CO2EQ
else:
CO2 = pd.read_csv('data/RCP45_MIDYEAR_CONCENTRATIONS.DAT', skiprows=38, delim_whitespace=True, index_col=0).CO2EQ * 0. + 400.
if crop == 'maize':
x = np.array([9.375e+00, 3.198e+01, 1.973e+00, 8.700e+01, 1.144e+01, 3.630e+01, 7.260e-02, 1.237e+00, 2.180e+03, 1.501e-01, 5.230e-01, 5.678e+01, 6.970e+02])
elif crop == 'meadow':
x = np.array([6.543e+00, 1.238e+01, 1.029e+00, 8.706e+01, 1.510e+01, 3.253e+01, 1.199e+00, 1.535e+03, 7.784e+03, 6.530e+03, 8.030e+03, 8.092e+03, 5.884e+03])
elif crop == 'wheat':
x = np.array([1.257e+00, 1.276e+01, 1.101e+00, 1.010e+02, 2.578e+01, 2.769e+01, 3.416e-01, 4.940e-01, 1.906e+03, 1.921e-01, 4.595e-01, 6.066e+01, 5.360e+02])
else:
print('ERROR: Crop not specified with parameters.')
yields = clc.cli_SC(data, x, CO2, nme=name)
return yields
def water_proj():
#hsim = pd.read_csv('data/hsim.csv',index_col=0)
#hsim.index = pd.to_datetime(hsim.index)
hsim_collect = pd.read_csv('data/hsim_collect.csv', index_col=0)
hsim_collect.index = | pd.to_datetime(hsim_collect.index) | pandas.to_datetime |
from tutorial.main.stepbystep.stepbysteputils.pgconnector import create_engine_ready
import pandas as pd
from suricate.explore import Explorer
from suricate.base import multiindex21column
engine = create_engine_ready()
n_questions = 100
# nrows = 50
# Xst = pd.read_sql(sql="SELECT * FROM es_scores LIMIT {}".format(nrows), con=engine).set_index(['ix_source', 'ix_target'], drop=True)[['ix', 'es_score']]
# Xsbs = pd.read_sql(sql="SELECT * FROM es_sbs LIMIT {}".format(nrows), con=engine).set_index(['ix_source', 'ix_target'], drop=True)
Xst = pd.read_sql(sql="SELECT * FROM es_scores", con=engine).set_index(['ix_source', 'ix_target'], drop=True)[['ix', 'es_score']]
Xsbs = pd.read_sql(sql="SELECT * FROM es_sbs", con=engine).set_index(['ix_source', 'ix_target'], drop=True)
# REBUILD Y_true
y_true = pd.read_sql(sql="SELECT * FROM y_true WHERE y_true.y_true = 1", con=engine).set_index(['ix_source', 'ix_target'], drop=True)
y_truetemp=Xst[['ix']]
y_truetemp['y_true']=0
y_truetemp.loc[y_true.index.intersection(Xst.index), 'y_true'] = y_true.loc[y_true.index.intersection(Xst.index), 'y_true']
y_true = y_truetemp.copy()
del y_truetemp
### y_true has now a multiindex, ix, and y_true columns
## Fit the cluster to non-supervized data
exp = Explorer(n_simple=n_questions, n_hard=n_questions)
exp.fit_cluster(X=Xst[['es_score']])
y_cluster = pd.Series(data=exp.pred_cluster(X=Xst), index=Xst.index, name='y_cluster')
X_cluster = | pd.DataFrame(y_cluster) | pandas.DataFrame |
# test bin, analyze, and plot functions
# imports
import os
from os.path import join
from os import listdir
import matplotlib.pyplot as plt
# imports
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
import filter
import analyze
from correction import correct
from utils import fit, functions, bin, io, plotting, modify, plot_collections
from utils.plotting import lighten_color
# A note on SciencePlots colors
"""
Blue: #0C5DA5
Green: #00B945
Red: #FF9500
Orange: #FF2C00
Other Colors:
Light Blue: #7BC8F6
Paler Blue: #0343DF
Azure: #069AF3
Dark Green: #054907
"""
sciblue = '#0C5DA5'
scigreen = '#00B945'
scired = '#FF9500'
sciorange = '#FF2C00'
plt.style.use(['science', 'ieee', 'std-colors'])
fig, ax = plt.subplots()
size_x_inches, size_y_inches = fig.get_size_inches()
plt.close(fig)
# ----------------------------------------------------------------------------------------------------------------------
# 1. SETUP - BASE DIRECTORY
base_dir = '/Users/mackenzie/Desktop/gdpyt-characterization/experiments/11.02.21-BPE_Pressure_Deflection_20X/analyses/'
# ----------------------------------------------------------------------------------------------------------------------
# 2. SETUP - IDPT
path_idpt = join(base_dir, 'results-04.26.22_idpt')
path_test_coords = join(path_idpt, 'coords/test-coords')
path_calib_coords = join(path_idpt, 'coords/calib-coords')
path_similarity = join(path_idpt, 'similarity')
path_results = join(path_idpt, 'results')
path_figs = join(path_idpt, 'figs')
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
# 3. ANALYSIS - READ FILES
method = 'idpt'
microns_per_pixel = 0.8
# ----- 4.1 CORRECT TEST COORDS
correct_test_coords = False
if correct_test_coords:
use_idpt_zf = False
use_spct_zf = False
# ------------------------------------------------------------------------------------------------------------------
if use_idpt_zf:
"""
NOTE: This correction scheme fits a 2D spline to the in-focus particle positions and uses this to set their
z_f = 0 position.
"""
param_zf = 'zf_from_peak_int'
plot_calib_plane = False
plot_calib_spline = False
kx, ky = 2, 2
# step 1. read calibration coords
dfc, dfcpid, dfcpop, dfcstats = io.read_calib_coords(path_calib_coords, method)
# step 2. remove outliers
# 2.1 get z_in-focus mean + standard deviation
zf_c_mean = dfcpid[param_zf].mean()
zf_c_std = dfcpid[param_zf].std()
# 2.2 filter calibration coords
dfcpid = dfcpid[(dfcpid[param_zf] > zf_c_mean - zf_c_std) & (dfcpid[param_zf] < zf_c_mean + zf_c_std)]
# step 3. fit plane
dictc_fit_plane = correct.fit_in_focus_plane(df=dfcpid, param_zf=param_zf, microns_per_pixel=microns_per_pixel)
popt_c = dictc_fit_plane['popt_pixels']
if plot_calib_plane:
fig = plotting.plot_fitted_plane_and_points(df=dfcpid, dict_fit_plane=dictc_fit_plane)
plt.savefig(path_figs + '/idpt-calib-coords_fit-plane_raw.png')
plt.close()
dfict_fit_plane = pd.DataFrame.from_dict(dictc_fit_plane, orient='index', columns=['value'])
dfict_fit_plane.to_excel(path_figs + '/idpt-calib-coords_fit-plane_raw.xlsx')
# step 4. FIT SMOOTH 2D SPLINE AND PLOT RAW POINTS + FITTED SURFACE (NO CORRECTION)
bispl_c, rmse_c = fit.fit_3d_spline(x=dfcpid.x,
y=dfcpid.y,
z=dfcpid[param_zf],
kx=kx,
ky=ky)
if plot_calib_spline:
fig, ax = plotting.scatter_3d_and_spline(dfcpid.x, dfcpid.y, dfcpid[param_zf],
bispl_c,
cmap='RdBu',
grid_resolution=30,
view='multi')
ax.set_xlabel('x (pixels)')
ax.set_ylabel('y (pixels)')
ax.set_zlabel(r'$z_{f} \: (\mu m)$')
plt.suptitle('fit RMSE = {}'.format(np.round(rmse_c, 3)))
plt.savefig(path_figs + '/idpt-calib-coords_fit-spline_kx{}_ky{}.png'.format(kx, ky))
plt.close()
# step 5. read test_coords
dft = io.read_test_coords(path_test_coords)
# step 6. drop unnecessary columns in dft
dft = dft[['frame', 'id', 'z', 'z_true', 'x', 'y', 'cm', 'error']]
# step 7. create a z_corr column by using fitted spline to correct z
dft = correct.correct_z_by_spline(dft, bispl=bispl_c, param_z='z')
dft['z_true_corr'] = dft['z_true'] - dft['z_cal_surf']
# step 8. export corrected test_coords
dft.to_excel(path_results + '/test_coords_corrected_t-calib2_c-calib1.xlsx', index=False)
elif use_spct_zf:
"""
NOTE: No correction is currently performed. The z-coords are well aligned enough in both calibration image sets
to just ignore. This is not necessarily surprising because the calibration images were acquired with the intention
of making the z-coords identical for all calibration image sets (by using the same beginning and ending tick mark
on the fine adjustment knob during image acquisition).
"""
# --------------------------------------------------------------------------------------------------------------
# SETUP - SPCT CALIBRATION IN-FOCUS COORDS
# SPCT analysis of images used for IDPT calibration
path_spct_calib_coords = join(base_dir, 'results-04.26.22_spct_calib1_test-2-3/coords/calib-coords')
path_calib_pid_defocus = join(path_spct_calib_coords, 'calib_spct_pid_defocus_stats_c-calib1_t-calib2.xlsx')
path_calib_spct_stats = join(path_spct_calib_coords, 'calib_spct_stats_c-calib1_t-calib2.xlsx')
path_calib_spct_pop = join(path_spct_calib_coords, 'calib_spct_pop_defocus_stats_c-calib1_t-calib2.xlsx')
# SPCT analysis of images used for IDPT test
path_spct_test_coords = join(base_dir, 'results-04.28.22_spct-calib2_test3/coords/calib-coords')
path_test_pid_defocus = join(path_spct_test_coords, 'calib_spct_pid_defocus_stats_c-calib2_t-calib3.xlsx')
path_test_spct_stats = join(path_spct_test_coords, 'calib_spct_stats_c-calib2_t-calib3.xlsx')
path_test_spct_pop = join(path_spct_test_coords, 'calib_spct_pop_defocus_stats_c-calib2_t-calib3.xlsx')
# --------------------------------------------------------------------------------------------------------------
# --- PART A. READ COORDS USED FOR IDPT CALIBRATION (i.e. 'calib1')
merge_spct_stats = True
param_zf = 'zf_from_peak_int'
plot_calib_plane = True
plot_test_plane = True
kx, ky = 2, 2
# step 1. merge [['x', 'y']] into spct pid defocus stats.
if merge_spct_stats:
# read SPCT calibration coords and merge ['x', 'y'] into pid_defocus_stats
dfcpid = pd.read_excel(path_calib_pid_defocus)
dfcstats = pd.read_excel(path_calib_spct_stats)
dfcpid = modify.merge_calib_pid_defocus_and_correction_coords(path_calib_coords, method, dfs=[dfcstats,
dfcpid])
else:
# read SPCT pid defocus stats that have already been merged
path_calib_pid_defocus = join(path_calib_coords, 'calib_spct_pid_defocus_stats_calib1_xy.xlsx')
dfcpid = pd.read_excel(path_calib_pid_defocus)
# step 2. remove outliers
# 2.1 get z_in-focus mean + standard deviation
zf_c_mean = dfcpid[param_zf].mean()
zf_c_std = dfcpid[param_zf].std()
# 2.2 filter calibration coords
dfcpid = dfcpid[(dfcpid[param_zf] > 34) & (dfcpid[param_zf] < zf_c_mean + zf_c_std / 2)]
dfcpid = dfcpid[dfcpid['x'] > 120]
# step 3. fit plane
dictc_fit_plane = correct.fit_in_focus_plane(df=dfcpid, param_zf=param_zf, microns_per_pixel=microns_per_pixel)
popt_c = dictc_fit_plane['popt_pixels']
if plot_calib_plane:
fig = plotting.plot_fitted_plane_and_points(df=dfcpid, dict_fit_plane=dictc_fit_plane)
plt.savefig(path_figs + '/calibration-coords_fit-plane_raw.png')
plt.close()
dfict_fit_plane = pd.DataFrame.from_dict(dictc_fit_plane, orient='index', columns=['value'])
dfict_fit_plane.to_excel(path_figs + '/calibration-coords_fit-plane_raw.xlsx')
# FIT SMOOTH 2D SPLINE AND PLOT RAW POINTS + FITTED SURFACE (NO CORRECTION)
bispl_c, rmse_c = fit.fit_3d_spline(x=dfcpid.x,
y=dfcpid.y,
z=dfcpid[param_zf],
kx=kx,
ky=ky)
fig, ax = plotting.scatter_3d_and_spline(dfcpid.x, dfcpid.y, dfcpid[param_zf],
bispl_c,
cmap='RdBu',
grid_resolution=30,
view='multi')
ax.set_xlabel('x (pixels)')
ax.set_ylabel('y (pixels)')
ax.set_zlabel(r'$z_{f} \: (\mu m)$')
plt.suptitle('fit RMSE = {}'.format(np.round(rmse_c, 3)))
plt.savefig(path_figs + '/calibration-coords_fit-spline_kx{}_ky{}.png'.format(kx, ky))
plt.close()
# ---
# --- PART B. READ COORDS USED FOR IDPT TEST (i.e. 'calib2')
# step 1. merge [['x', 'y']] into spct pid defocus stats.
if merge_spct_stats:
# read SPCT calibration coords and merge ['x', 'y'] into pid_defocus_stats
dfcpid = pd.read_excel(path_test_pid_defocus)
dfcstats = pd.read_excel(path_test_spct_stats)
dfcpid = modify.merge_calib_pid_defocus_and_correction_coords(path_calib_coords, method, dfs=[dfcstats,
dfcpid])
else:
# read SPCT pid defocus stats that have already been merged
path_calib_pid_defocus = join(path_calib_coords, 'calib_spct_pid_defocus_stats_calib2_xy.xlsx')
dfcpid = pd.read_excel(path_calib_pid_defocus)
# step 2. remove outliers
# 2.1 get z_in-focus mean + standard deviation
zf_c_mean = dfcpid[param_zf].mean()
zf_c_std = dfcpid[param_zf].std()
# 2.2 filter calibration coords
dfcpid = dfcpid[(dfcpid[param_zf] > zf_c_mean - zf_c_std / 2) & (dfcpid[param_zf] < zf_c_mean + zf_c_std / 2)]
# step 3. fit plane
dictc_fit_plane = correct.fit_in_focus_plane(df=dfcpid, param_zf=param_zf, microns_per_pixel=microns_per_pixel)
popt_c = dictc_fit_plane['popt_pixels']
if plot_test_plane:
fig = plotting.plot_fitted_plane_and_points(df=dfcpid, dict_fit_plane=dictc_fit_plane)
plt.savefig(path_figs + '/test-coords_fit-plane_raw.png')
plt.close()
dfict_fit_plane = pd.DataFrame.from_dict(dictc_fit_plane, orient='index', columns=['value'])
dfict_fit_plane.to_excel(path_figs + '/test-coords_fit-plane_raw.xlsx')
# FIT SMOOTH 2D SPLINE AND PLOT RAW POINTS + FITTED SURFACE (NO CORRECTION)
bispl_c, rmse_c = fit.fit_3d_spline(x=dfcpid.x,
y=dfcpid.y,
z=dfcpid[param_zf],
kx=kx,
ky=ky)
fig, ax = plotting.scatter_3d_and_spline(dfcpid.x, dfcpid.y, dfcpid[param_zf],
bispl_c,
cmap='RdBu',
grid_resolution=30,
view='multi')
ax.set_xlabel('x (pixels)')
ax.set_ylabel('y (pixels)')
ax.set_zlabel(r'$z_{f} \: (\mu m)$')
plt.suptitle('fit RMSE = {}'.format(np.round(rmse_c, 3)))
plt.savefig(path_figs + '/test-coords_fit-spline_kx{}_ky{}.png'.format(kx, ky))
plt.close()
# ----------------------------------------------------------------------------------------------------------------------
# 4. PLOT TEST COORDS RMSE-Z
analyze_test_coords = False
save_plots = False
show_plots = False
if analyze_test_coords:
# read test coords
dft = io.read_test_coords(path_test_coords)
# test coords stats
mag_eff = 20.0
area_pixels = 512 ** 2
area_microns = (512 * microns_per_pixel) ** 2
i_num_rows = len(dft)
i_num_pids = len(dft.id.unique())
# ---
# --- STEP 0. drop and rename columns for simplicity
dft = dft.drop(columns=['z', 'z_true'])
dft = dft.rename(columns={'z_corr': 'z', 'z_true_corr': 'z_true'})
# ---
rmse_all_particles = False
rmse_on_off_bpe = False
rmse_compare = False
# format plots
xylim = 37.25
xyticks = [-30, -15, 0, 15, 30]
lbls = ['On', 'Border', 'Off']
markers = ['s', 'd', 'o']
if rmse_all_particles:
# --- STEP 1. CALCULATE RMSE-Z FOR ALL PARTICLES
column_to_bin = 'z_true'
bins_z = 20
round_z_to_decimal = 3
min_cm = 0.5
# 1.1 mean rmse-z
dfrmse_mean = bin.bin_local_rmse_z(dft,
column_to_bin=column_to_bin,
bins=1,
min_cm=min_cm,
z_range=None,
round_to_decimal=round_z_to_decimal,
df_ground_truth=None,
dropna=True,
error_column='error',
)
dfrmse_mean.to_excel(path_results + '/mean-rmse-z_bin=1_no-filters.xlsx')
# 1.2 binned rmse-z
dfrmse = bin.bin_local_rmse_z(dft,
column_to_bin=column_to_bin,
bins=bins_z,
min_cm=min_cm,
z_range=None,
round_to_decimal=round_z_to_decimal,
df_ground_truth=None,
dropna=True,
error_column='error',
)
dfrmse.to_excel(path_results + '/binned-rmse-z_bins={}_no-filters.xlsx'.format(bins_z))
# 1.3 groupby 'bin' rmse-z mean + std
dfrmsem, dfrmsestd = bin.bin_generic(dft,
column_to_bin='bin',
column_to_count='id',
bins=bins_z,
round_to_decimal=round_z_to_decimal,
return_groupby=True)
# 1.3 plot binned rmse-z
if save_plots or show_plots:
# close all figs
plt.close('all')
# ----------------------- BASIC RMSE-Z PLOTS
# rmse-z: microns
fig, ax = plt.subplots()
ax.plot(dfrmse.index, dfrmse.rmse_z, '-o')
ax.set_xlabel(r'$z_{true} \: (\mu m)$')
ax.set_xlim([-xylim, xylim])
ax.set_xticks(ticks=xyticks, labels=xyticks)
ax.set_ylabel(r'$\sigma_{z} \: (\mu m)$')
plt.tight_layout()
if save_plots:
plt.savefig(path_figs + '/rmse-z_microns.png')
if show_plots:
plt.show()
plt.close()
# ----------------------- Z-MEAN +/- Z-STD PLOTS
# fit line
popt, pcov = curve_fit(functions.line, dfrmse.z_true, dfrmse.z)
z_fit = np.linspace(dfrmse.z_true.min(), dfrmse.z_true.max())
rmse_fit_line = np.sqrt(np.sum((functions.line(dfrmse.z_true, *popt) - dfrmse.z)**2) / len(dfrmse.z))
print(rmse_fit_line)
# binned calibration curve with std-z errorbars (microns) + fit line
fig, ax = plt.subplots()
ax.errorbar(dfrmsem.z_true, dfrmsem.z, yerr=dfrmsestd.z, fmt='o', ms=3, elinewidth=0.5, capsize=1, color=sciblue,
label=r'$\overline{z} \pm \sigma$') #
ax.plot(z_fit, functions.line(z_fit, *popt), linestyle='--', linewidth=1.5, color='black', alpha=0.25,
label=r'$dz/dz_{true} = $' + ' {}'.format(np.round(popt[0], 3)))
ax.set_xlabel(r'$z_{true} \: (\mu m)$')
ax.set_xlim([-xylim, xylim])
ax.set_xticks(ticks=xyticks, labels=xyticks)
ax.set_ylabel(r'$z \: (\mu m)$')
ax.set_ylim([-xylim, xylim])
ax.set_yticks(ticks=xyticks, labels=xyticks)
ax.legend(loc='lower right', handletextpad=0.25, borderaxespad=0.3)
plt.tight_layout()
if save_plots:
plt.savefig(path_figs +
'/calibration_curve_z+std-errobars_fit_line_a{}_b{}_slope-label-blk.png'.format(
np.round(popt[0],
3),
np.round(popt[1],
3))
)
if show_plots:
plt.show()
plt.close()
if rmse_on_off_bpe:
# --- STEP 0. SPLIT DATAFRAME INTO (1) OFF BPE and (2) OFF BPE.
column_to_bin = 'x'
bins_x = [145, 175, 205]
round_x_to_decimal = 0
dfbx = bin.bin_by_list(dft,
column_to_bin=column_to_bin,
bins=bins_x,
round_to_decimal=round_x_to_decimal,
)
df_on = dfbx[dfbx['bin'] == bins_x[0]]
df_edge = dfbx[dfbx['bin'] == bins_x[1]]
df_off = dfbx[dfbx['bin'] == bins_x[2]]
# --- plotting
# --- STEP 1. PLOT CALIBRATION CURVE (Z VS. Z_TRUE) FOR EACH DATAFRAME (ON, EDGE, OFF)
ss = 1
fig, ax = plt.subplots()
ax.scatter(df_off.z_true, df_off.z, s=ss, marker=markers[2], color=sciblue, label=lbls[2])
ax.scatter(df_on.z_true, df_on.z, s=ss, marker=markers[0], color=sciorange, label=lbls[0])
ax.scatter(df_edge.z_true, df_edge.z, s=ss, marker=markers[1], color=scired, label=lbls[1])
ax.set_xlabel(r'$z_{true} \: (\mu m)$')
ax.set_xlim([-xylim, xylim])
ax.set_xticks(ticks=xyticks, labels=xyticks)
ax.set_ylabel(r'$z \: (\mu m)$')
ax.set_ylim([-xylim, xylim])
ax.set_yticks(ticks=xyticks, labels=xyticks)
ax.legend(loc='lower right', markerscale=2.5)
plt.tight_layout()
if save_plots:
plt.savefig(path_figs + '/on-edge-off-bpe_calibration_curve.png')
if show_plots:
plt.show()
plt.close()
# --- STEP 2. FOR EACH DATAFRAME (ON, EDGE, OFF), COMPUTE RMSE-Z AND PLOT
for lbl, dft in zip(lbls, [df_on, df_edge, df_off]):
# --- STEP 1. CALCULATE RMSE-Z FOR ALL PARTICLES
column_to_bin = 'z_true'
bins_z = 20
round_z_to_decimal = 3
min_cm = 0.5
# 1.1 mean rmse-z
dfrmse_mean = bin.bin_local_rmse_z(dft,
column_to_bin=column_to_bin,
bins=1,
min_cm=min_cm,
z_range=None,
round_to_decimal=round_z_to_decimal,
df_ground_truth=None,
dropna=True,
error_column='error',
)
dfrmse_mean.to_excel(path_results + '/{}_mean-rmse-z_bin=1_no-filters.xlsx'.format(lbl))
# 1.2 binned rmse-z
dfrmse = bin.bin_local_rmse_z(dft,
column_to_bin=column_to_bin,
bins=bins_z,
min_cm=min_cm,
z_range=None,
round_to_decimal=round_z_to_decimal,
df_ground_truth=None,
dropna=True,
error_column='error',
)
dfrmse.to_excel(path_results + '/{}_binned-rmse-z_bins={}_no-filters.xlsx'.format(lbl, bins_z))
# 1.3 groupby 'bin' rmse-z mean + std
dfrmsem, dfrmsestd = bin.bin_generic(dft,
column_to_bin='bin',
column_to_count='id',
bins=bins_z,
round_to_decimal=round_z_to_decimal,
return_groupby=True)
# 1.3 plot binned rmse-z
if save_plots or show_plots:
# close all figs
plt.close('all')
# ----------------------- BASIC RMSE-Z PLOTS
# rmse-z: microns
fig, ax = plt.subplots()
ax.plot(dfrmse.index, dfrmse.rmse_z, '-o')
ax.set_xlabel(r'$z_{true} \: (\mu m)$')
ax.set_xlim([-xylim, xylim])
ax.set_xticks(ticks=xyticks, labels=xyticks)
ax.set_ylabel(r'$\sigma_{z} \: (\mu m)$')
plt.tight_layout()
if save_plots:
plt.savefig(path_figs + '/{}_rmse-z_microns.png'.format(lbl))
if show_plots:
plt.show()
plt.close()
# ----------------------- Z-MEAN +/- Z-STD PLOTS
# fit line
popt, pcov = curve_fit(functions.line, dfrmse.z_true, dfrmse.z)
z_fit = np.linspace(dfrmse.z_true.min(), dfrmse.z_true.max())
rmse_fit_line = np.sqrt(np.sum((functions.line(dfrmse.z_true, *popt) - dfrmse.z) ** 2) / len(dfrmse.z))
print(rmse_fit_line)
# binned calibration curve with std-z errorbars (microns) + fit line
fig, ax = plt.subplots()
ax.errorbar(dfrmsem.z_true, dfrmsem.z, yerr=dfrmsestd.z, fmt='o', ms=3, elinewidth=0.5, capsize=1,
color=sciblue,
label=r'$\overline{z} \pm \sigma$') #
ax.plot(z_fit, functions.line(z_fit, *popt), linestyle='--', linewidth=1.5, color='black', alpha=0.25,
label=r'$dz/dz_{true} = $' + ' {}'.format(np.round(popt[0], 3)))
ax.set_xlabel(r'$z_{true} \: (\mu m)$')
ax.set_xlim([-xylim, xylim])
ax.set_xticks(ticks=xyticks, labels=xyticks)
ax.set_ylabel(r'$z \: (\mu m)$')
ax.set_ylim([-xylim, xylim])
ax.set_yticks(ticks=xyticks, labels=xyticks)
ax.legend(loc='lower right', handletextpad=0.25, borderaxespad=0.3)
plt.tight_layout()
if save_plots:
plt.savefig(path_figs +
'/{}_calibration_curve_z+std-errobars_fit_line_a{}_b{}_slope-label-blk.png'.format(
lbl,
np.round(popt[0],
3),
np.round(popt[1],
3))
)
if show_plots:
plt.show()
plt.close()
if rmse_compare:
# 1. read binned rmse-z dataframes from Excel
path_rmse_compare = join(path_results, 'on-edge-off-bpe')
df1 = pd.read_excel(join(path_rmse_compare, '{}_binned-rmse-z_bins=20_no-filters.xlsx'.format(lbls[0])))
df2 = pd.read_excel(join(path_rmse_compare, '{}_binned-rmse-z_bins=20_no-filters.xlsx'.format(lbls[1])))
df3 = pd.read_excel(join(path_rmse_compare, '{}_binned-rmse-z_bins=20_no-filters.xlsx'.format(lbls[2])))
# 1.3 plot binned rmse-z
if save_plots or show_plots:
ms = 4
# ----------------------- BASIC RMSE-Z PLOTS
# rmse-z: microns
fig, ax = plt.subplots()
ax.plot(df3.bin, df3.rmse_z, '-o', ms=ms, label=lbls[2], color=sciblue)
ax.plot(df2.bin, df2.rmse_z, '-o', ms=ms, label=lbls[1], color=scired)
ax.plot(df1.bin, df1.rmse_z, '-o', ms=ms, label=lbls[0], color=sciorange)
ax.set_xlabel(r'$z_{true} \: (\mu m)$')
ax.set_xlim([-xylim, xylim])
ax.set_xticks(ticks=xyticks, labels=xyticks)
ax.set_ylabel(r'$\sigma_{z} \: (\mu m)$')
ax.legend()
plt.tight_layout()
if save_plots:
plt.savefig(path_figs + '/compare-on-edge-off-bpe_rmse-z_microns.png')
if show_plots:
plt.show()
plt.close()
# rmse-z (microns) + c_m
darken_clr = 1.0
alpha_clr = 1.0
fig, [axr, ax] = plt.subplots(nrows=2, sharex=True, gridspec_kw={'height_ratios': [1, 2]})
axr.plot(df3.bin, df3.cm, '-', ms=ms-2, marker=markers[2], color=sciblue)
axr.plot(df2.bin, df2.cm, '-', ms=ms-2, marker=markers[1], color=scired)
axr.plot(df1.bin, df1.cm, '-', ms=ms-2, marker=markers[0], color=sciorange)
axr.set_ylabel(r'$c_{m}$')
ax.plot(df3.bin, df3.rmse_z, '-', ms=ms-0.75, marker=markers[2], color=sciblue, label=lbls[2])
ax.plot(df2.bin, df2.rmse_z, '-', ms=ms-0.75, marker=markers[1], color=scired, label=lbls[1])
ax.plot(df1.bin, df1.rmse_z, '-', ms=ms-0.75, marker=markers[0], color=sciorange, label=lbls[0])
ax.set_xlabel(r'$z_{true} \: (\mu m)$')
ax.set_xlim([-xylim, xylim])
ax.set_xticks(ticks=xyticks, labels=xyticks)
ax.set_ylabel(r'$\sigma_{z} \: (\mu m)$')
ax.legend()
plt.tight_layout()
if save_plots:
plt.savefig(path_figs + '/compare-on-edge-off-bpe_rmse-z_microns_cm.png')
if show_plots:
plt.show()
plt.close()
# ----------------------------------------------------------------------------------------------------------------------
# 5. IDPT VS. SPCT - COMPARE NUMBER OF PARTICLES PER Z
compare_idpt_spct = False
save_plots = False
show_plots = False
if compare_idpt_spct:
# --- 1. IDPT
# read IDPT test coords
dft = io.read_test_coords(path_test_coords)
# test coords stats
mag_eff = 20.0
area_pixels = 512 ** 2
area_microns = (512 * microns_per_pixel) ** 2
i_num_rows = len(dft)
i_num_pids = len(dft.id.unique())
dft = dft.drop(columns=['z', 'z_true'])
dft = dft.rename(columns={'z_corr': 'z', 'z_true_corr': 'z_true'})
# --- 2. SPCT
# 2.1 read SPCT off-bpe test coords
dfs_off = pd.read_excel('/Users/mackenzie/Desktop/gdpyt-characterization/experiments/11.02.21-BPE_Pressure_Deflection_20X/analyses/results-04.26.22_spct_calib1_test-2-3/coords/test-coords/test_coords_t-calib2_c-calib1.xlsx')
dfs_on = | pd.read_excel('/Users/mackenzie/Desktop/gdpyt-characterization/experiments/11.02.21-BPE_Pressure_Deflection_20X/analyses/results-04.26.22_spct_stack-id-on-bpe/testcalib2_calcalib1/test_coords_t_20X_ccalib1_tcalib2_c_20X_tcalib2_ccalib1_2022-04-26 20:45:34.334931.xlsx') | pandas.read_excel |
from collections import Counter
import numpy as np
import pandas as pd
from scipy.spatial.distance import cdist
class KNN:
def __init__(self, k: int):
self.k = k # number of nearest neighbors to be found
self.features = pd.DataFrame([]) # feature matrix
self.labels = pd.Series([]) # label vector
self.index = pd.Index([]) # indices of all the rows
self.target = "" # name of the label
self.columns = pd.Index([]) # indices of all the columns
self.num_cols = pd.Index([]) # indices of numerical columns
self.cat_cols = pd.Index([]) # indices of categorical columns
def train(self, X: pd.DataFrame, y: pd.Series):
# Sanity check
assert all(X.index == y.index), "Indices mismatch"
# Drop rows with missing data
Xy = pd.concat([X, y], axis=1).dropna(axis=0, how='any')
_X, _y = Xy[X.columns], Xy[y.name]
# Initialization
self.features = _X
self.labels = _y
self.index = _X.index
self.target = _y.name
self.columns = _X.columns
self.num_cols = _X.select_dtypes(include='number').columns
self.cat_cols = _X.select_dtypes(exclude='number').columns
self.cat_cols = self.columns.drop(self.num_cols)
def predict(self, x: pd.Series, return_neighbors: bool = False):
r"""Predict the label of a single instance
Args:
x: pd.Series
return_neighbors: bool
If set to true, return the k nearest neighbors of
the given instance, along with the the label
Returns:
label_pred (return_neighbors=False)
label_pred, neighbors (return_neighbors=True)
"""
# Compute all pairwise distances
dists = self.distance(x)
# Select the k nearest neighbors
idx = np.argpartition(dists, self.k)[:self.k]
idx_neighbors = dists.iloc[idx].index
features_k = self.features.loc[idx_neighbors]
labels_k = self.labels.loc[idx_neighbors]
# Majority vote
label_pred = Counter(labels_k).most_common(1)[0][0]
# Return class label and/or neighbors
if return_neighbors:
neighbors = pd.concat([features_k, labels_k], axis=1)
return label_pred, neighbors
else:
return label_pred
def impute(self, X: pd.DataFrame) -> pd.DataFrame:
r"""Impute the missing value of the given dataset
Args:
X: pd.DataFrame
Returns:
X_imputed
"""
# Sanity check
assert all(X.columns == self.columns), "Entries mismatch"
# Combine X and self.features into the entire dataset
E = pd.concat([self.features, X])
# Impute each row of X
X_imputed = []
for index, x in X.iterrows():
# Find k nearest neighbors
_, neighbors = self.predict(x, return_neighbors=True)
neighbors.drop(columns=self.target)
neighbors_num = neighbors[self.num_cols]
neighbors_cat = neighbors[self.cat_cols]
# Impute values
impute_num = neighbors_num.mean()
impute_cat = neighbors_cat.mode()
# Breaking ties for categorical values
if len(impute_cat) > 1: # at least one entry includes ties
ties_idx = impute_cat.columns[impute_cat.count() > 1]
ties = impute_cat[ties_idx]
# Break ties by comparing occurrences in the entire dataset
wins = {}
for tie in ties.iteritems():
feature, cat = tie
# Filter occurrences of interest
cat_counts = E[feature].value_counts()[cat.dropna()]
# Select the category with the highest frequency
cat_win = cat_counts.sort_values(ascending=False).index[0]
# Update impute_cat
wins[feature] = cat_win
# Update and clean up impute_cat
for feature, cat_win in wins.items():
impute_cat.loc[0, feature] = cat_win
# Combine impute values
impute_cat = impute_cat.loc[0] # squeeze impute_cat into pd.Series
impute_val = pd.concat([impute_num, impute_cat])
# Fill missing values
_nan_cols = self.columns[x.isna()]
x_imputed = x.copy()
x_imputed[_nan_cols] = impute_val[_nan_cols]
X_imputed.append(x_imputed)
# Clean up X_imputed
X_imputed = pd.DataFrame(X_imputed, index=X.index)
return X_imputed
def distance(self, x: pd.Series) -> pd.Series:
r"""Pairwise distance between the given instance and all the instances in the model
Args:
x: pd.Series
Returns:
dist
"""
# Sanity check
assert all(x.index == self.columns), "Entries mismatch"
# Drop columns with missing values
_nan_cols = self.columns[x.isna()]
_num_cols = self.num_cols.drop(_nan_cols, errors='ignore')
_cat_cols = self.cat_cols.drop(_nan_cols, errors='ignore')
# Split numerical (continuous) and categorical parts
x_num = x[_num_cols].to_numpy().reshape(1, -1)
features_num = self.features[_num_cols].to_numpy()
x_cat = x[_cat_cols]
features_cat = self.features[_cat_cols]
# Compute the distance
dist_num = cdist(x_num, features_num).squeeze(0)
dist_cat = np.sum(10 * (x_cat != features_cat), axis=1)
dist = | pd.Series(dist_num + dist_cat, index=self.index) | pandas.Series |
import os
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DatetimeIndex,
Interval,
IntervalIndex,
NaT,
Series,
TimedeltaIndex,
Timestamp,
cut,
date_range,
isna,
qcut,
timedelta_range,
)
import pandas._testing as tm
from pandas.api.types import CategoricalDtype as CDT
from pandas.core.algorithms import quantile
from pandas.tseries.offsets import Day, Nano
def test_qcut():
arr = np.random.randn(1000)
# We store the bins as Index that have been
# rounded to comparisons are a bit tricky.
labels, bins = qcut(arr, 4, retbins=True)
ex_bins = quantile(arr, [0, 0.25, 0.5, 0.75, 1.0])
result = labels.categories.left.values
assert np.allclose(result, ex_bins[:-1], atol=1e-2)
result = labels.categories.right.values
assert np.allclose(result, ex_bins[1:], atol=1e-2)
ex_levels = cut(arr, ex_bins, include_lowest=True)
tm.assert_categorical_equal(labels, ex_levels)
def test_qcut_bounds():
arr = np.random.randn(1000)
factor = qcut(arr, 10, labels=False)
assert len(np.unique(factor)) == 10
def test_qcut_specify_quantiles():
arr = np.random.randn(100)
factor = qcut(arr, [0, 0.25, 0.5, 0.75, 1.0])
expected = qcut(arr, 4)
tm.assert_categorical_equal(factor, expected)
def test_qcut_all_bins_same():
with pytest.raises(ValueError, match="edges.*unique"):
qcut([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 3)
def test_qcut_include_lowest():
values = np.arange(10)
ii = qcut(values, 4)
ex_levels = IntervalIndex(
[
Interval(-0.001, 2.25),
Interval(2.25, 4.5),
Interval(4.5, 6.75),
Interval(6.75, 9),
]
)
tm.assert_index_equal(ii.categories, ex_levels)
def test_qcut_nas():
arr = np.random.randn(100)
arr[:20] = np.nan
result = qcut(arr, 4)
assert isna(result[:20]).all()
def test_qcut_index():
result = qcut([0, 2], 2)
intervals = [Interval(-0.001, 1), Interval(1, 2)]
expected = Categorical(intervals, ordered=True)
tm.assert_categorical_equal(result, expected)
def test_qcut_binning_issues(datapath):
# see gh-1978, gh-1979
cut_file = datapath(os.path.join("reshape", "data", "cut_data.csv"))
arr = np.loadtxt(cut_file)
result = qcut(arr, 20)
starts = []
ends = []
for lev in np.unique(result):
s = lev.left
e = lev.right
assert s != e
starts.append(float(s))
ends.append(float(e))
for (sp, sn), (ep, en) in zip(
zip(starts[:-1], starts[1:]), zip(ends[:-1], ends[1:])
):
assert sp < sn
assert ep < en
assert ep <= sn
def test_qcut_return_intervals():
ser = Series([0, 1, 2, 3, 4, 5, 6, 7, 8])
res = qcut(ser, [0, 0.333, 0.666, 1])
exp_levels = np.array(
[Interval(-0.001, 2.664), Interval(2.664, 5.328), Interval(5.328, 8)]
)
exp = Series(exp_levels.take([0, 0, 0, 1, 1, 1, 2, 2, 2])).astype(CDT(ordered=True))
tm.assert_series_equal(res, exp)
@pytest.mark.parametrize("labels", ["foo", 1, True])
def test_qcut_incorrect_labels(labels):
# GH 13318
values = range(5)
msg = "Bin labels must either be False, None or passed in as a list-like argument"
with pytest.raises(ValueError, match=msg):
qcut(values, 4, labels=labels)
@pytest.mark.parametrize("labels", [["a", "b", "c"], list(range(3))])
def test_qcut_wrong_length_labels(labels):
# GH 13318
values = range(10)
msg = "Bin labels must be one fewer than the number of bin edges"
with pytest.raises(ValueError, match=msg):
qcut(values, 4, labels=labels)
@pytest.mark.parametrize(
"labels, expected",
[
(["a", "b", "c"], Categorical(["a", "b", "c"], ordered=True)),
(list(range(3)), Categorical([0, 1, 2], ordered=True)),
],
)
def test_qcut_list_like_labels(labels, expected):
# GH 13318
values = range(3)
result = qcut(values, 3, labels=labels)
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize(
"kwargs,msg",
[
({"duplicates": "drop"}, None),
({}, "Bin edges must be unique"),
({"duplicates": "raise"}, "Bin edges must be unique"),
({"duplicates": "foo"}, "invalid value for 'duplicates' parameter"),
],
)
def test_qcut_duplicates_bin(kwargs, msg):
# see gh-7751
values = [0, 0, 0, 0, 1, 2, 3]
if msg is not None:
with pytest.raises(ValueError, match=msg):
qcut(values, 3, **kwargs)
else:
result = qcut(values, 3, **kwargs)
expected = IntervalIndex([Interval(-0.001, 1), Interval(1, 3)])
tm.assert_index_equal(result.categories, expected)
@pytest.mark.parametrize(
"data,start,end", [(9.0, 8.999, 9.0), (0.0, -0.001, 0.0), (-9.0, -9.001, -9.0)]
)
@pytest.mark.parametrize("length", [1, 2])
@pytest.mark.parametrize("labels", [None, False])
def test_single_quantile(data, start, end, length, labels):
# see gh-15431
ser = Series([data] * length)
result = qcut(ser, 1, labels=labels)
if labels is None:
intervals = IntervalIndex([Interval(start, end)] * length, closed="right")
expected = Series(intervals).astype(CDT(ordered=True))
else:
expected = Series([0] * length)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"ser",
[
Series(DatetimeIndex(["20180101", NaT, "20180103"])),
Series(TimedeltaIndex(["0 days", NaT, "2 days"])),
],
ids=lambda x: str(x.dtype),
)
def test_qcut_nat(ser):
# see gh-19768
intervals = IntervalIndex.from_tuples(
[(ser[0] - Nano(), ser[2] - Day()), np.nan, (ser[2] - Day(), ser[2])]
)
expected = Series(Categorical(intervals, ordered=True))
result = qcut(ser, 2)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("bins", [3, np.linspace(0, 1, 4)])
def test_datetime_tz_qcut(bins):
# see gh-19872
tz = "US/Eastern"
ser = Series(date_range("20130101", periods=3, tz=tz))
result = qcut(ser, bins)
expected = Series(
IntervalIndex(
[
Interval(
Timestamp("2012-12-31 23:59:59.999999999", tz=tz),
Timestamp("2013-01-01 16:00:00", tz=tz),
),
Interval(
Timestamp("2013-01-01 16:00:00", tz=tz),
Timestamp("2013-01-02 08:00:00", tz=tz),
),
Interval(
Timestamp("2013-01-02 08:00:00", tz=tz),
Timestamp("2013-01-03 00:00:00", tz=tz),
),
]
)
).astype(CDT(ordered=True))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"arg,expected_bins",
[
[
timedelta_range("1day", periods=3),
TimedeltaIndex(["1 days", "2 days", "3 days"]),
],
[
date_range("20180101", periods=3),
DatetimeIndex(["2018-01-01", "2018-01-02", "2018-01-03"]),
],
],
)
def test_date_like_qcut_bins(arg, expected_bins):
# see gh-19891
ser = Series(arg)
result, result_bins = qcut(ser, 2, retbins=True)
tm.assert_index_equal(result_bins, expected_bins)
@pytest.mark.parametrize("bins", [6, 7])
@pytest.mark.parametrize(
"box, compare",
[
(Series, tm.assert_series_equal),
(np.array, tm.assert_categorical_equal),
(list, tm.assert_equal),
],
)
def test_qcut_bool_coercion_to_int(bins, box, compare):
# issue 20303
data_expected = box([0, 1, 1, 0, 1] * 10)
data_result = box([False, True, True, False, True] * 10)
expected = | qcut(data_expected, bins, duplicates="drop") | pandas.qcut |
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
def _clean_soup(soup):
"""Clean up html to get rid of tricky comments."""
return BeautifulSoup(str(soup).replace('<!--', '').replace('-->', ''), "lxml")
def _get_urls(row):
"""Get urls for all cells in table row."""
all = row.findAll('th') + row.findAll('td')
return [t.find('a')['href'] if t.find('a') else np.nan for t in all]
def list_tables(soup):
"""List tables present in BSoup object that we know how to fetch."""
soup = _clean_soup(soup)
return [t.attrs['id'] for t in soup.findAll('table') if 'id' in t.attrs]
def _convert_dtypes(df):
"""Convert dtypes of df, when possible."""
for col in df.columns:
try:
df[col] = [int(i) if i != '' and i is not None else None for i in df[col]]
except ValueError:
try:
df[col] = df[col].replace('', np.nan).astype(float)
except ValueError:
pass
return df
def get_table_from_soup(table_soup, get_url=False, include_tfoot=False):
"""Extract table from html to pd.DataFrame."""
th_soups = table_soup.find('thead').findAll('tr')
columns = [t['data-stat'] for t in th_soups[-1].findAll('th')]
row_soups = table_soup.findAll('tr', {'class': 'full_table'})
if not row_soups:
row_soups = table_soup.find('tbody').findAll('tr')
if include_tfoot:
row_soups.extend(table_soup.find('tfoot').findAll('tr'))
rows = [[r.find('th').text] + [t.text for t in r.findAll('td')] for r in row_soups]
df = | pd.DataFrame(rows, columns=columns) | pandas.DataFrame |
import pandas as pd
import numpy as np
from typing import Dict, Iterator, Tuple
from ..models import unit
from ..models.column import Column
from ..models.table import Table
from ..generators.label_generator import LabelGenerator
from ..generators.unit_generator import UnitGenerator
from ..generators.column_generator import ColumnGenerator
from ..generators.table_generator import TableGenerator
from ..generators.dataset_generator import DatasetGenerator
from core.generators.mashup_dataset_generator import MashupDatasetGenerator
## Many-to-Many - Same input / output
class ManyToManySameInputAndOutputVocabOrchestrator(object):
def __init__(self, vocab: str, unit_lookup: Dict[int, str]):
assert len(vocab) > 0
self.vocab = [ char for char in vocab ]
system_vocab = ['\n', ' ', ] + [ char for char in '0123456789.\/' ]
for v in system_vocab:
self.vocab.append(v)
## want to make <eol> = 0
self.vocab = ['<eol>'] + list(sorted(set(self.vocab)))
self.index_to_char = dict([ (i, c) for i, c in enumerate(self.vocab) ])
self.char_to_index = dict([ (c, i) for i, c in enumerate(self.vocab) ])
self.unit_lookup = unit_lookup
label_generator_index_to_char = self.index_to_char.copy()
for key in (['<eol>'] + system_vocab):
del label_generator_index_to_char[self.char_to_index[key]]
self.generators = {
'label': LabelGenerator(label_generator_index_to_char),
'unit': UnitGenerator(self.unit_lookup),
}
self.generators['column'] = ColumnGenerator(
self.generators['label'],
self.generators['unit']
)
super().__init__()
def create_table_generator(self, table: Table):
return TableGenerator(table, self.generators['column'])
def get_vocab_size(self):
return len(self.vocab)
def build_dataset(self, options: Iterator[dict]):
## individual option: { 'table', 'n_instances' }
return MashupDatasetGenerator().get_data(options)
def translate_to_string_array(self, input_array: Iterator[int]) -> Iterator[str]:
return [ self.index_to_char[i] for i in input_array ]
def translate_to_integer_array(self, dataset: pd.DataFrame, table_lookup: Dict[str, Table]) -> pd.DataFrame:
def translate(row: str):
return [ self.char_to_index[char] for char in row ]
## 1. calculate the worst possible table,
## - we want to fit a dataset, with different possible tables, into one where the input and output are the exact same.
table_height_width_lookup = {}
for key in table_lookup.keys():
table = table_lookup[key]
calculated_table_width, calculated_table_height = table.get_max_size()
table_height_width_lookup[key] = (calculated_table_width, calculated_table_height, calculated_table_width * calculated_table_height)
translated_dataset = []
for index, row in dataset.iterrows():
key = row['table_id']
table_width, table_height, target_total_chars = table_height_width_lookup[key]
train = []
table = table_lookup[key]
table_splits = row['table'].strip('\n').split('\n')
for i, table_row in enumerate(table_splits):
translated_row = translate(table_row)
if i != 0 and i < len(table_splits) - 1:
translated_row = [self.char_to_index['\n']] + translated_row
if len(translated_row) < table_width:
pad = table_width - len(translated_row)
translated_row = translated_row + [ self.char_to_index[' '] for _ in range(pad)]
assert len(translated_row) == table_width, f'{len(translated_row)} <> {table_width}, {index}'
train.append(translated_row)
while np.array(train).shape[0] < table_height:
translated_row = [ self.char_to_index['<eol>'] for _ in range(table_width)]
train.append(translated_row)
shape = np.array(train).shape
assert shape[0] == table_height, f'{shape[0]} <> {table_height}, {index}'
target = translate(row['target'].strip('\n'))
if len(target) < target_total_chars:
pad = target_total_chars - len(target)
target = target + [ self.char_to_index['<eol>'] for _ in range(pad)]
assert len(target) == target_total_chars, f'{len(target)} <> {target_total_chars}, {index}'
translated_dataset.append(
(np.array(train).flatten().tolist(), target, len(table.columns))
)
df = | pd.DataFrame(translated_dataset) | pandas.DataFrame |
import numpy as np
import pandas as pd
from scipy import signal, stats
from sklearn.linear_model import LinearRegression
from obspy.signal.trigger import classic_sta_lta
def sequence_generator(data, xcol="acoustic_data", ycol="time_to_failure", size=150000):
"""Generator that extracts segments of the signal from the data.
Parameters
----------
data: pd.DataFrame,
The data with all observations. Must have two columns: one with the measurement
of the signal and one with the target, i.e., time to the next earthquake.
xcol: str, optional (default: "acoustic_data"),
The column referring to the the signal data.
ycol: str, optional (default: "time_to_failure"),
The column referring to the target value.
size: int, optional (default: 150,000),
The number of observations to include in a single sequence. Should be left at
its default to generate sequences similar to the test data.
Returns
-------
A generator object that generates tuples like:
(sequence of 'size' observations of xcol, last corresponding value of ycol).
"""
while True:
indices = np.random.randint(0, len(data) - size - 1, 10000)
for idx in indices:
y = data[ycol].iloc[idx + size - 1]
x = data[idx:(idx + size)][xcol].values
yield x, y
class FeatureComputer():
"""Class that computes features over a given array of observations.
This is done in a class so that it can be initialized once and can then be used throughout the
train-validate-test sequence without specifying all the parameters everytime.
Parameters
----------
minimum, maximum, mean, median, std: boolean, optional (default: True),
Whether to include the corresponding feature.
quantiles: list of floats,
The quantiles to compute.
abs_min, abs_max, abs_mean, abs_median, abs_std: boolean, optional (default: True),
The same features as above, but calculated over the absolute signal.
abs_quantiles: list of floats,
The quantiles to compute over the absolute signal.
mean_abs_delta, mean_rel_delta: boolean, optional (default: True),
Whether to compute the average change per observation. For 'mean_rel_delta' it is divided
by the value of the previous observation, which leads to a change proportion.
max_to_min: boolean, optional (default: True),
Whether to compute the rate between the absolute maximum and the absolute minimum.
count_abs_big: list of floats,
The thresholds for which it is counted how many times the absolute signal
exceeds the threshold.
abs_trend: boolean, optional (default: True),
Whether to calculate the linear trend of the time series.
mad: boolean, optional (default: True),
Whether to calculate the mean absolute deviation of the time series.
skew: boolean, optional (default: True),
Whether to calculate the skewness of the time series.
abs_skew: boolean, optional (default: True),
Whether to calculate the skewness of the absolute values of the time series.
kurtosis: boolean, optional (default: True),
Whether to calculate the kurosis of the time series. The kurtosis
measures the tailedness of a time series
abs_kurtosis: boolean, optional (default: True),
Whether to calculate the kurosis of the absolute values of the time series.
The kurtosis measures the tailedness of a time series
hilbert: boolean, optional (default: True),
Whether to calculate the abs mean in hilbert tranformed space.
hann: boolean, optional (default: True),
Whether to calculate the abs mean in hann window.
stalta: list of floats,
The short time average and the long time average over which the short time
average over long time average is calculated.
stalta_window: list of floats,
The short time average and the long time average over which the short time
average over long time average is calculated per window.
exp_mov_ave: list of floats,
The time windows over which the mean of the mean of the exponential
moving average is calculated.
exp_mov_ave_window: list of floats,
The time windows over which the mean of the mean of the exponential
moving average is calculated per window.
window: int or None, optional (default: None),
If given, calculates the features over subsequences of size 'window'.
array_length: int, optional (default: 150000),
The array length to expect. Only needed if window is not None.
Returns
-------
result: np.array,
The specified features of the given array.
Notes
-----
In order to see which value in the result refers to which feature, see 'self.feature_names'.
"""
feats = ["minimum", "maximum", "mean", "median", "std", "abs_min", "abs_max", "abs_mean",
"abs_median", "abs_std", "mean_abs_delta", "mean_rel_delta", "max_to_min", "abs_trend",
"mad", "skew", "abs_skew", "kurtosis", "abs_kurtosis", "hilbert", "hann"]
def __init__(self, minimum=True, maximum=True, mean=True, median=True, std=True, quantiles=None,
abs_min=True, abs_max=True, abs_mean=True, abs_median=True, abs_std=True, abs_quantiles=None,
mean_abs_delta=True, mean_rel_delta=True, max_to_min=True, count_abs_big=None,
abs_trend=True, mad=True, skew=True, abs_skew=True, kurtosis=True, abs_kurtosis=True,
hilbert=True, hann=True, stalta=None, stalta_window=None, exp_mov_ave=None, exp_mov_ave_window=None,
window=None, array_length=150000):
self.minimum = minimum
self.maximum = maximum
self.mean = mean
self.median = median
self.std = std
self.abs_min = abs_min
self.abs_max = abs_max
self.abs_mean = abs_mean
self.abs_median = abs_median
self.abs_std = abs_std
self.mean_abs_delta = mean_abs_delta
self.mean_rel_delta = mean_rel_delta
self.max_to_min = max_to_min
self.abs_trend = abs_trend
self.mad = mad
self.skew = skew
self.abs_skew = abs_skew
self.kurtosis = kurtosis
self.abs_kurtosis = abs_kurtosis
self.hilbert = hilbert
self.hann = hann
if quantiles is None:
self.quantiles = []
else:
self.quantiles = quantiles
if abs_quantiles is None:
self.abs_quantiles = []
else:
self.abs_quantiles = abs_quantiles
self.window = window
if count_abs_big is None:
self.count_abs_big = []
else:
self.count_abs_big = count_abs_big
if stalta is None:
self.stalta = []
else:
self.stalta = stalta
if stalta_window is None:
self.stalta_window = []
else:
self.stalta_window = stalta_window
if exp_mov_ave is None:
self.exp_mov_ave = []
else:
self.exp_mov_ave = exp_mov_ave
if exp_mov_ave_window is None:
self.exp_mov_ave_window = []
else:
self.exp_mov_ave_window = exp_mov_ave_window
if self.window is not None:
self.indicators = np.array(([np.ones(window)*i for i in range(int(np.ceil(array_length/window)))]),
dtype=int).flatten()
self.indicators = self.indicators[:array_length]
assert len(self.indicators) == array_length, "Lengths do not match"
self.feature_names = self._infer_names()
self.n_features = len(self.feature_names)
def _infer_names(self):
"""Infer the names of the features that will be calculated."""
quantile_names = [str(q) + "-quantile" for q in self.quantiles]
abs_quantile_names = [str(q) + "-abs_quantile" for q in self.abs_quantiles]
count_abs_big_names = [str(q) + "-count_big" for q in self.count_abs_big]
stalta_names = ["all_stalta-" + str(q[0]) + "-" + str(q[1]) for q in self.stalta]
exp_mov_ave_names = ["all_exp_mov_ave-" + str(q) for q in self.exp_mov_ave]
if self.window is not None:
stalta_names_window = ["stalta-" + str(q[0]) + "-" + str(q[1]) for q in self.stalta_window]
exp_mov_ave_names_window = ["exp_mov_ave-" + str(q) for q in self.exp_mov_ave_window]
names = np.array(self.feats)[[self.minimum, self.maximum, self.mean, self.median, self.std,
self.abs_min, self.abs_max, self.abs_mean, self.abs_median,
self.abs_std, self.mean_abs_delta, self.mean_rel_delta,
self.max_to_min, self.abs_trend, self.mad, self.skew, self.abs_skew,
self.kurtosis, self.abs_kurtosis, self.hilbert, self.hann]]
names = names.tolist() + quantile_names + abs_quantile_names + count_abs_big_names
if self.window is not None:
all_names = [str(i) + "_" + name for i in np.unique(self.indicators) for name in names + stalta_names_window + exp_mov_ave_names_window]
self.result_template_window = np.zeros(int(len(all_names) / len(np.unique(self.indicators))))
all_names = all_names + ["all_" + name for name in names] + stalta_names + exp_mov_ave_names
self.result_template = np.zeros(len(names + stalta_names + exp_mov_ave_names))
return all_names
else:
all_names = names + stalta_names + exp_mov_ave_names
self.result_template = np.zeros(len(all_names))
return all_names
def compute(self, arr):
if self.window is None:
return self._compute_features(arr)
else:
df = pd.DataFrame({"arr": arr, "indicator": self.indicators})
values = (df.groupby("indicator")["arr"]
.apply(lambda x: self._compute_features(x, window=True))
.apply(pd.Series)
.values
.flatten())
# include values over the whole segment
overall_values = self._compute_features(arr)
return np.concatenate([values, overall_values])
def _compute_features(self, arr, window=False):
if window:
result = np.zeros_like(self.result_template_window)
else:
result = np.zeros_like(self.result_template)
i = 0
if self.minimum:
result[i] = np.min(arr)
i += 1
if self.maximum:
result[i] = np.max(arr)
i += 1
if self.mean:
result[i] = np.mean(arr)
i += 1
if self.median:
result[i] = np.median(arr)
i += 1
if self.std:
result[i] = np.std(arr)
i += 1
if self.abs_min:
result[i] = np.min(np.abs(arr))
i += 1
if self.abs_max:
result[i] = np.max(np.abs(arr))
i += 1
if self.abs_mean:
result[i] = np.mean(np.abs(arr))
i += 1
if self.abs_median:
result[i] = np.median(np.abs(arr))
i += 1
if self.abs_std:
result[i] = np.std(np.abs(arr))
i += 1
if self.mean_abs_delta:
result[i] = np.mean(np.diff(arr))
i += 1
if self.mean_rel_delta:
result[i] = np.mean(np.nonzero((np.diff(arr) / arr[:-1]))[0])
i += 1
if self.max_to_min:
result[i] = np.max(arr) / np.abs(np.min(arr))
i += 1
if self.abs_trend:
idx = np.array(range(len(arr)))
lr = LinearRegression()
lr.fit(idx.reshape(-1, 1), np.abs(arr))
result[i] = lr.coef_[0]
i += 1
if self.mad: # mean absolute deviation
result[i] = np.mean(np.abs(arr - np.mean(arr)))
i += 1
if self.skew:
result[i] = stats.skew(arr)
i += 1
if self.abs_skew:
result[i] = stats.skew(np.abs(arr))
i += 1
if self.kurtosis: # measure of tailedness
result[i] = stats.kurtosis(arr)
i += 1
if self.abs_kurtosis: # measure of tailedness
result[i] = stats.kurtosis(np.abs(arr))
i += 1
if self.hilbert: # abs mean in hilbert tranformed space
result[i] = np.mean(np.abs(signal.hilbert(arr)))
i += 1
if self.hann: # mean in hann window
result[i] = np.mean(signal.convolve(arr, signal.hann(150), mode='same') / np.sum(signal.hann(150)))
i += 1
if self.quantiles is not None:
result[i:i + len(self.quantiles)] = np.quantile(arr, q=self.quantiles)
i += len(self.quantiles)
if self.abs_quantiles is not None:
result[i:i + len(self.abs_quantiles)] = np.quantile(np.abs(arr), q=self.abs_quantiles)
i += len(self.abs_quantiles)
if self.count_abs_big is not None:
result[i: i + len(self.count_abs_big)] = np.array([len(arr[np.abs(arr) > q]) for q in self.count_abs_big])
i += len(self.count_abs_big)
if self.stalta:
if window:
result[i:i + len(self.stalta_window)] = np.array(
[np.mean(classic_sta_lta(arr, q[0], q[1])) for q in self.stalta_window])
i += len(self.stalta_window)
else:
result[i:i + len(self.stalta)] = np.array(
[np.mean(classic_sta_lta(arr, q[0], q[1])) for q in self.stalta])
i += len(self.stalta)
if self.exp_mov_ave:
if window:
result[i:i + len(self.exp_mov_ave_window)] = np.array(
[np.mean(pd.Series.ewm(pd.Series(arr), span=q).mean()) for q in self.exp_mov_ave_window])
i += len(self.exp_mov_ave_window)
else:
result[i:i + len(self.exp_mov_ave)] = np.array(
[np.mean(pd.Series.ewm( | pd.Series(arr) | pandas.Series |
from __future__ import division
import os
import time
import argparse
import subprocess
import random
import os.path as osp
from glob import glob
import tensorflow as tf
import numpy as np
import pandas as pd
from tqdm import tqdm
import utils.utils as utils
from utils.data import load_img, save_img, darken, gen_shadow,detect_shadow,batch_crop
from utils.data import concat_img, encode_jpeg,load_four
from utils.raw import load_four_raw,linref2srgb,rgbg2rgb,load_raw_test
from model.network import UNet as UNet
from model.network import UNet_SE as UNet_SE
from loss.losses import compute_percep_loss
seed = 2020#2019
np.random.seed(seed)
tf.set_random_seed(seed)
random.seed(seed)
parser = argparse.ArgumentParser()
parser.add_argument("--loss", default="lp", help="choose the loss type")
parser.add_argument("--is_test", default=0,type=int, help="choose the loss type")
parser.add_argument("--model", default="pre-trained",help="path to folder containing the model")
parser.add_argument("--debug", default=0, type=int, help="DEBUG or not")
parser.add_argument("--use_gpu", default=1, type=int, help="DEBUG or not")
parser.add_argument("--gpu", default=3, type=int, help="DEBUG or not")
parser.add_argument("--save_model_freq", default=10, type=int, help="frequency to save model")
ARGS = parser.parse_args()
DEBUG = ARGS.debug
save_model_freq = ARGS.save_model_freq
model=ARGS.model
is_test = ARGS.is_test
BATCH_SIZE=1
NOFLASH=False
continue_training=True
if ARGS.use_gpu:
os.environ["CUDA_VISIBLE_DEVICES"]=str(np.argmax([int(x.split()[2]) for x in subprocess.Popen("nvidia-smi -q -d Memory | grep -A4 GPU | grep Free", shell=True, stdout=subprocess.PIPE).stdout.readlines()]))
else:
os.environ["CUDA_VISIBLE_DEVICES"]=''
os.environ["OMP_NUM_THREADS"] = '4'
os.environ["CUDA_VISIBLE_DEVICES"] = str(ARGS.gpu)
print(ARGS)
# set up the model and define the graph
lossDict= {}
data_root="data/docshadow"
test_dirs=["bio2"]
test_dfs=[]
for subdir in test_dirs:
df=pd.read_csv(osp.join(data_root,subdir,'trip.csv'))
df["f"]=df["f"].map(lambda x: osp.join('rawc','derived',x))
df["m"]=df["ab"].map(lambda x:osp.join('rawc','derived',x))
df[["gt","ab"]]=df[["gt","ab"]].applymap(lambda x: osp.join('rawc','origin',x))
df=df.applymap(lambda x:osp.join(data_root,subdir,x+'.png'))
test_dfs.append(df)
test_df= | pd.concat(test_dfs) | pandas.concat |
#!/usr/bin/env python3
"""
Author : <NAME>
Date : 2022-02-03
Purpose: Parse tracy JSON files and produce summary .xlsx sheet.
"""
import argparse
from typing import NamedTuple
import json, pathlib, time
import pandas as pd
class Args(NamedTuple):
""" Command-line arguments """
json_file_path: pathlib.Path
output_dir: bool
input_type: bool
csv: bool
# --------------------------------------------------
def get_args() -> Args:
""" Get command-line arguments """
parser = argparse.ArgumentParser(
description='Parse tracy JSONs and produce a summary excel sheet',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('json_file_path',
metavar='json_file_path',
type=pathlib.Path,
help='Path of directory containing tracy JSONs (or directories of gene JSONs)')
parser.add_argument('-o',
'--output_dir',
help="flag whether directory 'output' will be created",
action='store_false')
parser.add_argument('-i',
'--input_type',
help="flag whether Michael put genes into individual folders",
action='store_false')
parser.add_argument('--csv',
help="flag whether .csv files will be produced for each gene (mostly for debugging)",
action='store_true')
args = parser.parse_args()
return Args(args.json_file_path, args.output_dir, args.input_type, args.csv)
# --------------------------------------------------
def main() -> None:
start_main = time.time()
# define args
args = get_args()
json_file_path_arg = args.json_file_path.resolve()
output_dir_arg = args.output_dir
input_type_arg = args.input_type
csv_arg = args.csv
# check output_dir flag, if true: make an output dir
if output_dir_arg:
output_flag = 'tracy_json_parse'
output_path = json_file_path_arg.joinpath(f"{output_flag}_output-{time.strftime('%Y_%m_%d-%H%M%S', time.localtime(time.time()))}")
output_path.mkdir(parents=True, exist_ok=True)
else:
output_path = json_file_path_arg
sample_list = {
'#reference': {
'aliases': ['reference', 'ref'],
'species': '#ref'
},
'AliBarber': {
'aliases': ['alibarber'],
'species': 'Bos taurus'
},
'Cochise': {
'aliases': ['cochise'],
'species': 'Bos taurus'
},
'Sansao': {
'aliases': ['sansao'],
'species': 'Bos taurus'
},
'Slugger': {
'aliases': ['slugger', 'slogger'],
'species': 'Bos taurus'
},
'LLNKune': {
'aliases': ['llnkune', 'llnkure'],
'species': 'Bos indicus'
},
'Nagaki': {
'aliases': ['nagaki'],
'species': 'Bos indicus'
},
'Raider': {
'aliases': ['raider'],
'species': 'Bos indicus'
},
'Renditium': {
'aliases': ['renditium', 'rendition'],
'species': 'Bos indicus'
}
}
list_of_genotype_DataFrames = {}
# check input_type flag, if true:
if input_type_arg:
for gene_dir in json_file_path_arg.iterdir():
if gene_dir.is_dir():
for sample_json in gene_dir.glob('*.json'):
query_path = gene_dir
list_of_genotype_DataFrames.update({gene_dir.stem: generate_genotype_DataFrame(sample_list, gene_dir.stem, query_path, output_path, csv_arg)})
else:
list_of_genes = list(set([gene.stem.split('_')[0] for gene in json_file_path_arg.glob('*.json')]))
for gene in list_of_genes:
query_path = json_file_path_arg
list_of_genotype_DataFrames.update({gene: generate_genotype_DataFrame(sample_list, gene, query_path, output_path, csv_arg)})
write_genotype_DataFrames_to_excel(list_of_genotype_DataFrames, output_path)
print_runtime(f'Produced summary tracy genotyping excel file.')
def generate_genotype_DataFrame(sample_list, gene_ID, query_path, output_path, csv_arg):
"""
Function will generate a genotype DataFrame with genotyping template
"""
SNP_data = generate_template(sample_list)
for sample_json in query_path.glob(f'{gene_ID}*.json'):
gene = sample_json.stem.split('_')[0]
sample = validate_sample_name(sample_json.stem.split('_')[1], sample_list)
results = parse_json(sample_json, gene, sample)
SNP_data[sample]['seq'] = True
SNP_data['#reference']['seq'] = True
for i in results[0]:
SNP_data[sample].update(i)
for i in results[1]:
SNP_data['#reference'].update(i)
SNP_DataFrame = pd.DataFrame.from_dict(SNP_data, orient='index')
for column in SNP_DataFrame.columns[2:]:
reference_genotype = SNP_DataFrame.at['#reference', column][0]
for row_num, row_val in SNP_DataFrame[1:].iterrows():
if not isinstance(SNP_DataFrame.at[row_num, column], list) and SNP_DataFrame.at[row_num, 'seq']:
SNP_DataFrame.at[row_num, column] = [reference_genotype, None, None]
elif not SNP_DataFrame.at[row_num, 'seq']:
SNP_DataFrame.at[row_num, column] = ['*', None, None]
if csv_arg:
SNP_DataFrame.to_csv(output_path.joinpath(f'{gene}.csv'))
print_runtime(f'Produced {gene}.csv')
if SNP_DataFrame.columns.tolist()[2:]:
SNP_DataFrame = SNP_DataFrame.explode(SNP_DataFrame.columns.tolist()[2:])
return SNP_DataFrame
def write_genotype_DataFrames_to_excel(list_of_genotype_DataFrames, output_path) -> None:
"""
Function will write each of the genotypes into a summary excel sheet with each gene in their own sheet
"""
with pd.ExcelWriter(output_path.joinpath('#summary_tracy_results.xlsx'), engine='xlsxwriter') as writer:
workbook = writer.book
# excel formatting styles
# --------------------------------------------------
f_align_left = workbook.add_format({
'align': 'left',
'valign': 'vcenter',})
f_align_center = workbook.add_format({
'align': 'center',
'valign': 'vcenter',})
f_neutral = workbook.add_format({
'align' : 'center',
'valign' : 'vcenter',
'bg_color' : '#ffeb9c',
'font_color' : '#9c5700'})
f_neutral_under = workbook.add_format({
'align' : 'center',
'valign' : 'vcenter',
'bg_color' : '#ffeb9c',
'font_color' : '#9c5700',
'bottom' : 1 })
f_bad = workbook.add_format({
'align' : 'center',
'valign' : 'vcenter',
'bg_color' : '#ffc7ce',
'font_color' : '#9c0006'})
f_bad_under = workbook.add_format({
'align' : 'center',
'valign' : 'vcenter',
'bg_color' : '#ffc7ce',
'font_color' : '#9c0006',
'bottom' : 1 })
f_header = workbook.add_format({
'align' : 'center',
'valign' : 'vcenter',
'bold' : True,
'bottom' : 6 })
f_ref = workbook.add_format({
'align' : 'center',
'valign' : 'vcenter',
'bold' : True,
'bottom' : 1 })
f_bos_divider = workbook.add_format({
'align' : 'center',
'valign' : 'vcenter',
'bottom' : 1 })
f_species_merge = workbook.add_format({
'bold': 1,
'border': 1,
'align': 'center',
'valign': 'vcenter',
'rotation': 90})
# for each each gene in the list of genotype DataFrames, set up an excel sheet
for gene in list_of_genotype_DataFrames:
# deal with blank tables because tracy didn't find any variants
if len(list_of_genotype_DataFrames[gene]) == 9:
list_of_genotype_DataFrames[gene][''] = [None]*9
list_of_genotype_DataFrames[gene].at['#reference', ''] = [None]
for row_i, row_val in list_of_genotype_DataFrames[gene].iterrows():
if row_i != '#reference':
list_of_genotype_DataFrames[gene].at[row_i,''] = [None, None, None]
list_of_genotype_DataFrames[gene] = list_of_genotype_DataFrames[gene].explode('')
list_of_genotype_DataFrames[gene].to_excel(writer, sheet_name=gene, index = True)
genetype_DataFrame_for_excel = list_of_genotype_DataFrames[gene].reset_index()
paternal_expressed = ['GNAS', 'MEST', 'NNAT', 'PEG10', 'PEG3', 'PLAGL1', 'RTL1', 'SNRPN', 'XIST']
maternal_expressed = ['H19', 'IGF2R']
# make formatting changes
# --------------------------------------------------
worksheet = writer.sheets[gene]
# colour sheet depending on whether it's maternal or paternal expressed
if gene.split('-')[0] in paternal_expressed:
worksheet.set_tab_color('#89CFF0')
elif gene.split('-')[0] in maternal_expressed:
worksheet.set_tab_color('#F2ACB9')
# change the column formatting
worksheet.set_column(0, 1, 10, f_align_left)
worksheet.set_column(1, 2, 7, f_align_left)
worksheet.set_column(3, len(list_of_genotype_DataFrames[gene].columns.tolist()) + 1, 14.3, f_align_center)
worksheet.set_column(2, 3, options={'hidden' : True})
# write the header
for col_num, col_value in enumerate(list_of_genotype_DataFrames[gene].columns.values):
worksheet.write(0, col_num + 1, col_value, f_header) # header
worksheet.write(1, col_num + 1, list_of_genotype_DataFrames[gene].iat[0, col_num], f_ref) # subheader
# iterate through the rows and do actions
for row_i, row_val in genetype_DataFrame_for_excel[1::3].iterrows():
# hide the second and third row after the reference
worksheet.set_row(row_i+2, None, options={'hidden' : True})
worksheet.set_row(row_i+3, None, options={'hidden' : True})
# for each column, get the column reference
for col_i, col_v in enumerate(list_of_genotype_DataFrames[gene].columns.values):
if col_i > 1:
reference_genotype = str(genetype_DataFrame_for_excel.at[0, col_v]).strip()
individual_genotype = str(row_val[col_v]).strip()
if row_i == 10:
worksheet.write(row_i + 1, col_i + 1, genetype_DataFrame_for_excel.at[row_i, col_v], f_bos_divider)
if row_i == 22:
worksheet.write(row_i + 1, col_i + 1, genetype_DataFrame_for_excel.at[row_i, col_v], f_bos_divider)
f_neutral_type = f_neutral_under if (row_i == 10 or row_i == 22) else f_neutral
f_bad_type = f_bad_under if (row_i == 10 or row_i == 22) else f_bad
# if the genotype isn't the reference and isn't a failed sequence, highlight this cell as yellow
if individual_genotype != reference_genotype and individual_genotype != '*':
worksheet.write(row_i + 1, col_i + 1, genetype_DataFrame_for_excel.at[row_i, col_v], f_neutral_type)
# if the genotype is a failed sequence, highlight this cell as red
elif individual_genotype == '*':
worksheet.write(row_i + 1, col_i + 1, '*', f_bad_type)
worksheet.merge_range('B3:B12', 'Bos indicus', f_species_merge)
worksheet.merge_range('B15:B24', 'Bos taurus', f_species_merge)
def parse_json(query_ab1, gene, name):
"""
Function will parse json files produced by tracy and return a list of sample genotypes and corresponding reference genotypes
"""
with open(f'{query_ab1}') as tracy_json:
# initialize the json file from tracy
data = json.load(tracy_json)
# make a dataframe of the variants
variant_data = | pd.DataFrame(data['variants']['rows'], columns=data['variants']['columns']) | pandas.DataFrame |
__author__ = 'qchasserieau'
import itertools
import pandas as pd
def nested_list(volume_array, paths):
return [[volume_array[i]] * len(paths[i]) for i in range(len(volume_array))]
def assign(volume_array, paths):
nested_row_indices = [[i] * len(paths[i]) for i in range(len(volume_array))]
row_indices = list(itertools.chain.from_iterable(nested_row_indices))
column_indices = list(itertools.chain.from_iterable(paths))
nested_volumes = nested_list(volume_array, paths)
volumes = list(itertools.chain.from_iterable(nested_volumes))
try:
test = volumes[0][0] # volumes_array is an ndarray
sparse = pd.concat(
(
pd.DataFrame(
{
'od': row_indices,
'link': column_indices
}
),
| pd.DataFrame(volumes) | pandas.DataFrame |
from datetime import datetime
from django import template
import pandas as pd
import numpy as np
import json
import ast
register = template.Library()
@register.filter(name='getlist')
def getlist(value):
return ast.literal_eval(value)
@register.simple_tag
def multiply(value):
return value * 6
@register.simple_tag
def frame(values):
df = | pd.DataFrame(values) | pandas.DataFrame |
from functools import partial
from warnings import warn
from typing import Optional, List
import pandas as pd
import numpy as np
def _check_features(df,
features):
valid_features = set(df.index) & set(features)
if len(features) != len(valid_features):
warn(f"Found {len(features) - len(valid_features)} invalid features (e.g. not shown in the dataframe)")
return valid_features
def calc_auc(rank_val: pd.Series,
max_rank: int):
insig_part = rank_val > max_rank
if all(insig_part):
return 0
else:
rank_val[insig_part] = max_rank + 1
rank_sum = sum(rank_val)
n = rank_val.shape[0]
u_val = rank_sum - (n * (n + 1)) / 2 # lower if the rank is higher
auc = 1 - (u_val / (n * max_rank))
return auc
def calc_U_stat_df(features,
df: pd.DataFrame,
neg_features: Optional[List[str]] = None,
max_rank=1500,
w_neg=1):
if neg_features is None:
neg_features = []
pos_features = list(set(features) - set(neg_features))
if len(pos_features) > 0:
pos = df.reindex(index=pos_features).apply(partial(calc_auc, max_rank=max_rank), axis=0).values
else:
pos = np.zeros(shape=(df.shape[2],))
if len(neg_features) > 0:
neg = df.reindex(index=neg_features).apply(partial(calc_auc, max_rank=max_rank), axis=0).values
else:
neg = np.zeros(shape=(df.shape[2],))
diff = pos - w_neg * neg
# diff[diff < 0] = 0
return diff
def cal_Uscore(X: pd.DataFrame,
pos_genes,
neg_genes,
max_rank=1500,
w_neg=1,
ties_method="average"):
ranked_df = X.rank(ascending=False, method=ties_method)
pos_genes = _check_features(X, pos_genes)
cell_auc = calc_U_stat_df(pos_genes, ranked_df,
neg_features=neg_genes,
max_rank=max_rank,
w_neg=w_neg)
return | pd.DataFrame(cell_auc, index=ranked_df.columns) | pandas.DataFrame |
from tqdm import tqdm
import pandas as pd
from datetime import timedelta
import pickle
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
def read():
"""
:return: first dataset and GDU dataset
df.columns = ['population', 'site', 'original_planting_date', 'early_planting_date',
'late_planting_date', 'required_gdus', 'scenario_1_harvest_quantity',
'scenario_2_harvest_quanitity']
dfGDU.columns = ['index', 'date', 'site_0', 'site_1']
"""
df = pd.read_csv('Dataset_1.csv')
df['original_planting_date'] = pd.to_datetime(df['original_planting_date'])
df['early_planting_date'] = pd.to_datetime(df['early_planting_date'])
df['late_planting_date'] = pd.to_datetime(df['late_planting_date'])
gdu_df = pd.read_csv('finalGDUs.csv')
gdu_df['date'] = pd.to_datetime(gdu_df['date'])
return df, gdu_df
def day_calculator(planting_date, gdu_df, req_gdu, site):
"""
:param planting_date:
:param gdu_df:
:param req_gdu:
:param site:
:return: the exact date of harvest for the given planting date, based on required GDU
"""
gdu_counter = 0
while gdu_counter < req_gdu:
gdu_of_day = gdu_df[site][gdu_df['date'] == planting_date]
try:
gdu_counter += float(gdu_of_day)
except:
pass
planting_date += timedelta(days=1)
return planting_date - timedelta(days=1)
def week_calculator(day):
"""
:param day: a pandas datetime type of a date
:return: the number of week past from starting_day
"""
starting_day = pd.to_datetime('2020-01-05')
temp = day - starting_day
week = np.ceil(temp.days/7)
if temp.days%7==0:
return week +2
return week+1
def harvest_changer_init(pop, week_condidates, df, weekly_harvest, scenario):
# get the only row that is belong to the chosen population
tempdf = df[df['population'] == pop]
minimum = np.inf
place_holder = []
for weeks in week_condidates: # weeks : ( week number, planting date, harvest date)
if weekly_harvest[weeks[0]] < minimum:
minimum = weekly_harvest[weeks[0]]
place_holder = weeks #keep the week that have less harvest quantity on it
idx = tempdf.index
if minimum == np.inf:
raise Exception('the population {} is not in this site'.format(pop))
if scenario == 1:
weekly_harvest[place_holder[0]] += int(tempdf['scenario_1_harvest_quantity'])
else:
weekly_harvest[place_holder[0]] += int(tempdf['scenario_2_harvest_quanitity'])
df['original_planting_date'].loc[idx] = place_holder[1]
df['harvest_time'].loc[idx] = place_holder[2]
return df, weekly_harvest
def loss(weekly_harvest, location_capacity):
weekly_harvest = list(weekly_harvest.values())
weekly_harvest = np.array(list(filter(lambda a: a != 0, weekly_harvest)))
location_capacity = np.ones(len(weekly_harvest)) * location_capacity
su = np.abs(location_capacity - weekly_harvest)
return np.sum(su)
def loss2(weekly_harvest):
weekly_harvest = list(weekly_harvest.values())
weekly_harvest = np.array(list(filter(lambda a: a != 0, weekly_harvest)))
mean = np.mean(weekly_harvest)
print('number of weeks: {}\nthe mean: {}\nthe max: {}\nthe median: {}\n######'.format(
len(weekly_harvest),mean,np.max(weekly_harvest),np.median(weekly_harvest)))
mean = np.ones(len(weekly_harvest)) * mean
objective = np.abs(mean - weekly_harvest)
return np.sum(objective)
def refiner(df, weekly_harvest, objective, populations, lop, location_capacity, scenario):
# we want to move from right to left and left to right simultaneously
left2right = np.array(list(weekly_harvest.keys()))
right2left = left2right[-1::-1]
# the weeks that could be delete and not hurting the objective function saves hare
forbidden_weeks = []
weekly_harvest = {i: 0 for i in weekly_harvest.keys()}
for left, right in tqdm(zip(left2right,right2left)):
ok = True
temp_weekly_harvest = weekly_harvest.copy()
temp_df = df.copy()
for pop in lop:
week_candidates = populations[pop]
temp_df, temp_weekly_harvest, ok = harvest_changer_refine(
pop, week_candidates, temp_df, temp_weekly_harvest.copy(), forbidden_weeks.copy(), left, scenario)
if not ok:
break
if ok:
if loss(temp_weekly_harvest, location_capacity) < objective:
forbidden_weeks.append(left)
objective = loss(temp_weekly_harvest, location_capacity)
df = temp_df.copy()
plotter(df, objective)
else:
print('objective before adding {} : {}\nbut after: {}'.format(
left, objective, loss(temp_weekly_harvest, location_capacity)
))
else:
print("couldn't add", left)
temp_weekly_harvest = weekly_harvest.copy()
ok = True
temp_df = df.copy()
for pop in lop:
week_candidates = populations[pop]
temp_df, temp_weekly_harvest, ok = harvest_changer_refine(
pop, week_candidates, temp_df, temp_weekly_harvest.copy(), forbidden_weeks.copy(), right, scenario)
if not ok:
break
if ok:
if loss(temp_weekly_harvest, location_capacity) < objective:
objective = loss(temp_weekly_harvest, location_capacity)
forbidden_weeks.append(right)
df = temp_df.copy()
plotter(df, objective)
else:
print('objective before adding {} : {}\nbut after: {}'.format(
right, objective, loss(temp_weekly_harvest, location_capacity)
))
else:
print("couldn't add", right)
print("the forbidden weeks are : ", forbidden_weeks)
return df, forbidden_weeks, objective
def harvest_changer_refine(pop, week_candidates, df, weekly_harvest, forbidden_weeks, test, scenario):
if test in forbidden_weeks:
return False, False, False
tempdf = df[df['population'] == pop]
forbidden_weeks += [test]
minimum = np.inf
place_holder = []
for weeks in week_candidates:
if weeks[0] in forbidden_weeks:
continue
if weekly_harvest[weeks[0]] < minimum:
minimum = weekly_harvest[weeks[0]]
place_holder = weeks
if minimum == np.inf:
return False, False, False
idx = tempdf.index
if scenario == 1:
weekly_harvest[place_holder[0]] += int(tempdf['scenario_1_harvest_quantity'])
else:
weekly_harvest[place_holder[0]] += int(tempdf['scenario_2_harvest_quanitity'])
df['original_planting_date'].loc[idx] = place_holder[1]
df['harvest_time'].loc[idx] = place_holder[2]
return df, weekly_harvest, True
def tunner(df, weekly_harvest, objective, populations, lop, location_capacity, forbidden_weeks, scenario):
results = {}
# see which weeks could be replaced to get a better objective
other_weeks = [i for i in weekly_harvest.keys() if i not in forbidden_weeks]
weekly_harvest = {i: 0 for i in weekly_harvest.keys()}
for week in tqdm(other_weeks):
for i in range(len(forbidden_weeks)):
temp_weekly_harvest = weekly_harvest.copy()
temp_df = df.copy()
ok = True
for pop in lop:
week_candidates = populations[pop]
temp_df, temp_weekly_harvest, ok = harvest_changer_tuning(
pop, week_candidates, temp_df, temp_weekly_harvest.copy(), forbidden_weeks.copy(), week, i, scenario)
if not ok:
break
if ok:
if loss(temp_weekly_harvest, location_capacity) < objective:
objective = loss(temp_weekly_harvest, location_capacity)
print('instead of week {} we put week {} and got objective {}'.format(
forbidden_weeks[i], week, objective
))
temp_forbidden_weeks = [k for j, k in enumerate(forbidden_weeks) if j != i]
results[objective] = temp_forbidden_weeks + [week]
return results
def harvest_changer_tuning(pop, week_candidates, df, weekly_harvest, forbidden_weeks, test, i, scenario):
tempdf = df[df['population'] == pop]
del forbidden_weeks[i]
forbidden_weeks += [test]
minimum = np.inf
place_holder = []
for weeks in week_candidates:
if weeks[0] in forbidden_weeks:
continue
if weekly_harvest[weeks[0]] < minimum:
minimum = weekly_harvest[weeks[0]]
place_holder = weeks
if minimum == np.inf:
return False, False, False
idx = tempdf.index
if scenario == 1:
weekly_harvest[place_holder[0]] += int(tempdf['scenario_1_harvest_quantity'])
else:
weekly_harvest[place_holder[0]] += int(tempdf['scenario_2_harvest_quanitity'])
df['original_planting_date'].loc[idx] = place_holder[1]
df['harvest_time'].loc[idx] = place_holder[2]
return df, weekly_harvest, True
def final(df, weekly_harvest, populations, lop, location_capacity, forbidden_weeks, scenario):
for pop in tqdm(lop):
week_candidates = populations[pop]
df, weekly_harvest, ok = harvest_changer_final(
pop, week_candidates, df, weekly_harvest, forbidden_weeks, scenario)
if not ok:
break
if ok:
objective = loss(weekly_harvest, location_capacity)
return df, objective
def harvest_changer_final(pop, week_candidates, df, weekly_harvest, forbidden_weeks, scenario):
tempdf = df[df['population'] == pop]
minimum = np.inf
place_holder = []
for weeks in week_candidates:
if weeks[0] in forbidden_weeks:
continue
if weekly_harvest[weeks[0]] < minimum:
minimum = weekly_harvest[weeks[0]]
place_holder = weeks
if minimum == np.inf:
return False, False, False
idx = tempdf.index
if scenario == 1:
weekly_harvest[place_holder[0]] += int(tempdf['scenario_1_harvest_quantity'])
else:
weekly_harvest[place_holder[0]] += int(tempdf['scenario_2_harvest_quanitity'])
df['original_planting_date'].loc[idx] = place_holder[1]
df['harvest_time'].loc[idx] = place_holder[2]
return df, weekly_harvest, True
def plotter(site0, objective):
site0_sc1_weekly = {}
# the start time to count the weeks
date = | pd.to_datetime("2020-01-05") | pandas.to_datetime |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests trained models on transfer environments to generate videos and scores.
Note that this code assumes it will be provided with a .csv file indicating
which checkpoints it should load based on finding the best hyperparameters
for a given metric, such as 'SolvedPathLength_last20%'. It assumes this csv will
have columns labeled 'metric', 'exp_id', 'best_seeds', and 'settings'. Such a
csv can be created using the function utils.save_best_work_units_csv()
"""
import ast
import datetime
import os
import pdb
import pickle
import sys
from absl import app
from absl import flags
from absl import logging
import numpy as np
import pandas as pd
import tensorflow as tf # tf
from tf_agents.environments import tf_py_environment
#from tf_agents.google.utils import mp4_video_recorder
from tf_agents.trajectories import time_step as ts_lib
from tf_agents.trajectories import trajectory
# Import needed to trigger env registration, so pylint: disable=unused-import
from social_rl import gym_multigrid
from social_rl.adversarial_env import adversarial_env
from social_rl.adversarial_env import utils
from social_rl.multiagent_tfagents import multiagent_gym_suite
flags.DEFINE_string(
'root_dir', None,
'Directory where videos and transfer results will be saved')
flags.mark_flag_as_required('root_dir')
#flags.DEFINE_string(
# 'hparam_csv', None,
# 'Required to determine which checkpoints to load.')
#flags.mark_flag_as_required('hparam_csv')
flags.DEFINE_string(
'transfer_csv', None,
'If provided, will load this csv and continue saving to it')
flags.DEFINE_boolean(
'test_on_test', False,
'If True, will also test on the test environments')
flags.DEFINE_boolean(
'test_mini', False,
'If True, will test on the mini environments instead')
flags.DEFINE_boolean(
'fill_in_missing', False,
'If True, will test load all existing transfer dfs and try to fill in the '
'missing data')
flags.DEFINE_boolean(
'reverse_order', False,
'If True, will iterate through experiments in reverse order.')
flags.DEFINE_string(
'metric', 'SolvedPathLength_best_ever',
'Metric to use for selecting which seeds to test.')
flags.DEFINE_boolean(
'debug', False,
'If True, will only load 1 seed for each experiment for speed')
flags.DEFINE_integer(
'num_trials', 10,
'Number of trials to do for each seed in each transfer environment')
flags.DEFINE_boolean(
'save_video_matrices', False,
'If True, will also save matrix encodings of the environment state used to'
'make rendered videos')
flags.DEFINE_string(
'name', 'Test transfer',
'Informative name to output to explain what process is running.')
FLAGS = flags.FLAGS
VAL_ENVS = [
'MultiGrid-TwoRooms-Minigrid-v0',
'MultiGrid-Cluttered40-Minigrid-v0',
'MultiGrid-Cluttered10-Minigrid-v0',
'MultiGrid-SixteenRooms-v0',
'MultiGrid-Maze2-v0',
'MultiGrid-Maze3-v0',
'MultiGrid-Labyrinth2-v0',
]
TEST_ENVS = [
'MultiGrid-FourRooms-Minigrid-v0',
'MultiGrid-Cluttered50-Minigrid-v0',
'MultiGrid-Cluttered5-Minigrid-v0',
'MultiGrid-Empty-Random-15x15-Minigrid-v0',
'MultiGrid-SixteenRoomsFewerDoors-v0',
'MultiGrid-Maze-v0',
'MultiGrid-Labyrinth-v0',
]
MINI_VAL_ENVS = [
'MultiGrid-MiniTwoRooms-Minigrid-v0',
'MultiGrid-Empty-Random-6x6-Minigrid-v0',
'MultiGrid-MiniCluttered6-Minigrid-v0',
'MultiGrid-MiniCluttered-Lava-Minigrid-v0',
'MultiGrid-MiniMaze-v0'
]
MINI_TEST_ENVS = [
'MultiGrid-MiniFourRooms-Minigrid-v0',
'MultiGrid-MiniCluttered7-Minigrid-v0',
'MultiGrid-MiniCluttered1-Minigrid-v0'
]
class Experiment:
"""Loads saved checkpoints, tests on transfer envs, generates videos."""
def __init__(self, name, exp_id, adv_env_name='MultiGrid-Adversarial-v0',
seeds=None, root_dir=None, checkpoint_nums=None, fps=4,
num_blank_frames=4, verbose=True, old_env=False, num_agents=3,
time_last_updated=10, population=True, save_matrices=False,
benchmark_against=None):
self.name = name
self.exp_id = exp_id
self.adv_env_name = adv_env_name
self.seeds = seeds
self.checkpoint_nums = checkpoint_nums
self.fps = fps
self.num_blank_frames = num_blank_frames
self.verbose = verbose
self.old_env = old_env
self.time_last_updated = time_last_updated
self.population = population
self.population_size = 1
self.checkpoint_nums = checkpoint_nums
self.save_matrices = save_matrices
self.benchmark_against = benchmark_against
self.num_agents = num_agents
# Paths
self.root_dir = root_dir
if root_dir is None:
self.root_dir = '/tmp/adversarial_env/'
self.videos_dir = os.path.join(self.root_dir, 'videos')
self.model_path = os.path.join(*[self.root_dir, adv_env_name, 'xm',
str(exp_id)])
self.policies = {}
self.py_env = None
self.tf_env = None
self.blank_frame = None
# Store the results of testing transfer on a number of environments
self.transfer_results = {}
def get_checkpoints_for_seed(self, seed):
path = os.path.join(self.model_path, seed)
path += '/policy_saved_model/agent'
if self.population:
path = os.path.join(path, '0')
return tf.io.gfile.listdir(path)
def get_latest_checkpoint(self, seed):
"""Finds the latest checkpoint number for a model."""
checkpoints = self.get_checkpoints_for_seed(seed)
skip_idx = len('policy_')
if len(checkpoints) < 2:
return None
else:
# Get second last checkpoint to avoid errors where a currently-running XM
# job is in the process of saving some checkpoint that cannot actually
# be loaded yet.
return checkpoints[-2][skip_idx:]
def load(self, claim=True):
"""Loads policies for all agents and initializes training environment."""
# Create directory to claim this experiment as currently being computed
# (Code prioritizes which checkpoint to load next based on when a file in
# this directory was last modified)
if claim:
claim_dir = os.path.join(self.videos_dir, self.name, 'WallsAreLava')
if not tf.io.gfile.exists(claim_dir):
tf.io.gfile.makedirs(claim_dir)
with tf.gfile.GFile(os.path.join(claim_dir, 'claim.txt'), 'wb') as f:
f.write('Claiming this experiment in the name of some process!\n')
print('Claiming this experiment by making file in', claim_dir)
print('Creating experiment', self.name)
print('Loading from', self.model_path)
if self.seeds is None:
files = tf.io.gfile.listdir(self.model_path)
self.seeds = [f for f in files if tf.io.gfile.isdir(
os.path.join(self.model_path, f))]
print('Checking seeds', ', '.join(self.seeds))
# Get latest checkpoint
if self.checkpoint_nums is None:
self.checkpoint_nums = {}
for s in self.seeds:
ckpt_num = self.get_latest_checkpoint(s)
if ckpt_num is None:
print("Can't find checkpoint for seed", s)
else:
self.checkpoint_nums[s] = ckpt_num
print('Loading policies...')
for s in self.seeds:
if s in self.checkpoint_nums.keys():
self.policies[s] = self.load_checkpoints_for_seed(s)
if self.py_env is None or self.tf_env is None:
print('Loading training environment...')
self.py_env, self.tf_env = self.load_environment(self.adv_env_name)
self.tf_env.reset()
self.blank_frame = self.py_env.render()
self.blank_frame_encoding = self.py_env._gym_env.grid.encode() # pylint:disable=protected-access
def load_checkpoint_from_path(self, path, copy_local=True):
"""Load checkpoint from path. Copy locally first to improve speed."""
if copy_local:
# First copy file from server locally to avoid deadline exceeded errors
# and increase performance
local_path = path.replace(self.root_dir, '/tmp/adversarial_env')
tf.io.gfile.makedirs(local_path)
utils.copy_recursively(path, local_path)
else:
local_path = path
return tf.compat.v2.saved_model.load(local_path)
def load_checkpoints_for_seed(self, seed):
"""Loads most recent checkpoint for each agent for a given work unit."""
policy_path = os.path.join(*[self.model_path, seed, 'policy_saved_model'])
checkpoint_path = 'policy_' + self.checkpoint_nums[seed]
policies = {}
agents = ['agent', 'adversary_agent', 'adversary_env']
if 'unconstrained' in self.name or 'minimax' in self.name:
agents = ['agent', 'adversary_env']
policies['adversary_agent'] = [None]
elif 'domain_randomization' in self.name:
agents = ['agent']
for name in agents:
if not self.population:
path = os.path.join(*[policy_path, name, checkpoint_path])
print('\tLoading seed', seed, 'policy for', name, '...')
sys.stdout.flush()
policies[name] = [self.load_checkpoint_from_path(path)]
else:
# Population-based training runs have several agents of each type.
agent_path = os.path.join(policy_path, name)
policies[name] = []
pop_agents = tf.io.gfile.listdir(agent_path)
for pop in pop_agents:
path = os.path.join(*[agent_path, pop, checkpoint_path])
if tf.io.gfile.exists(path):
print('\tLoading seed', seed, 'policy for', name, pop, '...')
policies[name].append(self.load_checkpoint_from_path(path))
return policies
def load_environment(self, env_name):
if 'Adversarial' in env_name:
py_env = adversarial_env.load(env_name)
tf_env = adversarial_env.AdversarialTFPyEnvironment(py_env)
else:
py_env = multiagent_gym_suite.load(env_name)
tf_env = tf_py_environment.TFPyEnvironment(py_env)
return py_env, tf_env
def initialize_video_for_seed(self, seed, env_name, agent_id=None,
adv_id=None, adv_agent_id=None):
"""Creates a video recorder which records agents playing an environment."""
env_start = env_name.find('MultiGrid') + len('MultiGrid-')
env_end = env_name.find('-v0')
trim_env_name = env_name[env_start:env_end]
vid_name = '{0}_xm{1}_{2}_s{3}_{4}'.format(
self.name, self.exp_id, trim_env_name, seed,
self.checkpoint_nums[seed])
if agent_id is not None:
vid_name += '_p{0}'.format(agent_id)
if adv_agent_id is not None:
vid_name += '_a{0}'.format(adv_agent_id)
if adv_id is not None:
vid_name += '_e{0}'.format(adv_id)
matrix_filename = vid_name + '.pkl'
vid_name += '.mp4'
vid_path = os.path.join(
*[self.videos_dir, self.name, trim_env_name, vid_name])
matrix_path = os.path.join(
*[self.videos_dir, self.name, trim_env_name, matrix_filename])
if self.verbose:
print('Saving video to', vid_path)
vid_recorder = mp4_video_recorder.Mp4VideoRecorder(vid_path, self.fps)
return vid_recorder, matrix_path
def run_adversarial_trial(self, adv_policy, agent_policy, recorder,
adv_agent_policy=None):
"""Run a trial in which the environment is generated by an adversary."""
# Run adversary to create environment
encoded_images = self.create_env_with_adversary(adv_policy, recorder)
# Run agent in the environment
reward, encoded_images = self.run_agent(
agent_policy, recorder, self.adv_env_name, self.py_env, self.tf_env,
encoded_images=encoded_images)
if adv_agent_policy is not None:
_, encoded_images = self.run_agent(
adv_agent_policy, recorder, self.adv_env_name, self.py_env,
self.tf_env, encoded_images=encoded_images)
return reward, encoded_images
def run_seed_trials(self, seed, env_name, agent_id=0, num_trials=25,
video_record_episodes=1, adv_id=0, adv_agent_id=None,
py_env=None, tf_env=None):
"""Run a number of trials in an env for agents from a specific seed."""
rewards = []
# Initialize video recorder
recorder = None
if video_record_episodes > 0:
recorder, matrix_filename = self.initialize_video_for_seed(
seed, env_name, agent_id, adv_id, adv_agent_id)
encoded_images = []
for t in range(num_trials):
# Usually record fewer episodes than the number of trials completed
if recorder and t == video_record_episodes:
recorder.end_video()
recorder = None
if (env_name == self.adv_env_name and
'domain_randomization' not in self.name):
# Run adversarial trial
if adv_agent_id is not None:
adv_agent_pol = self.policies[seed]['adversary_agent'][adv_agent_id]
else:
adv_agent_pol = None
r, encoded = self.run_adversarial_trial(
self.policies[seed]['adversary_env'][adv_id],
self.policies[seed]['agent'][agent_id],
recorder, adv_agent_policy=adv_agent_pol)
rewards.append(r)
if encoded is not None:
encoded_images.extend(encoded)
else:
# Run agent in a transfer environment
r, encoded = self.run_agent(
self.policies[seed]['agent'][agent_id], recorder, env_name,
py_env, tf_env)
rewards.append(r)
if encoded is not None:
encoded_images.extend(encoded)
if recorder:
recorder.end_video()
if self.save_matrices:
with tf.gfile.GFile(matrix_filename, 'wb') as f:
pickle.dump(encoded_images, f)
f.close()
return rewards
def check_how_many_trials_in_df(self, df, env, seed, metric, dr_equiv,
agent_id):
"""Check df to see if these trials have already been run."""
if df.empty or 'exp_id' not in df.columns.values:
return False, df
exp_df = df[df['exp_id'] == self.exp_id]
if exp_df.empty:
print('Experiment', self.name, self.exp_id,
'is not yet in the dataframe.')
return False, df
seed_df = exp_df[exp_df['seed'] == int(seed)]
if seed_df.empty:
return False, df
env_df = seed_df[seed_df['env'] == env]
if env_df.empty:
return False, df
ckpt_df = env_df[env_df['domain_rand_comparable_checkpoint'] == dr_equiv]
if ckpt_df.empty:
return False, df
ckpt_df = ckpt_df[ckpt_df['checkpoint'] == int(self.checkpoint_nums[seed])]
if ckpt_df.empty:
return False, df
agent_df = ckpt_df[ckpt_df['agent_id'] == agent_id]
if agent_df.empty:
return False, df
# Check if these results exist for a different metric, and if so duplicate
if metric is not None and metric not in agent_df['metric'].unique():
row_dict = agent_df[0:1].to_dict(orient='records')[0]
print('Found existing records for a different metric. Making a copy for '
'this metric')
print(row_dict)
row_dict['metric'] = metric
df = df.append(row_dict, ignore_index=True)
print('Found trials already in the df for', self.name, env,
'seed', seed, 'dr ckpt', dr_equiv, 'agent_id', agent_id,
'... Skipping ahead.')
return True, df
def find_dr_ckpt_equivalent(self, dr_ckpts, seed):
if dr_ckpts is None:
print('!! No DR checkpoints passed to find_dr_ckpt_equivalent, not '
'possible to find equivalent checkpoint')
return ''
equiv_ckpt = int(self.checkpoint_nums[seed]) / self.num_agents
diffs = np.abs(np.array(dr_ckpts) - equiv_ckpt)
return dr_ckpts[np.argmin(diffs)]
def run_trials_in_env(self, env_name, df=None, num_trials=100,
video_record_episodes=3, test_type='val', metric=None,
dr_ckpts=None):
"""Run all trials of all seeds for a particular environment."""
if env_name == self.adv_env_name and 'domain_randomization' in self.name:
print(
'Skipping adversarial episodes for domain randomization environment')
return None
else:
py_env, tf_env = self.load_environment(env_name)
if df is None:
df = pd.DataFrame()
for s in self.seeds:
dr_equiv = self.find_dr_ckpt_equivalent(dr_ckpts, s)
if s not in self.policies.keys():
continue
for agent_id in range(len(self.policies[s]['agent'])):
already_done, df = self.check_how_many_trials_in_df(
df, env_name, s, metric, dr_equiv, agent_id)
if already_done:
continue
if env_name == self.adv_env_name and 'domain_randomization' not in self.name:
for adv_id in range(len(self.policies[s]['adversary_env'])):
for adv_agent_id in range(len(self.policies[s]['adversary_agent'])):
rewards = self.run_seed_trials(
s, env_name, agent_id=agent_id, num_trials=num_trials,
video_record_episodes=video_record_episodes, adv_id=adv_id,
adv_agent_id=adv_agent_id)
row_dict = self.log_seed(
rewards, s, env_name, agent_id, adv_id, adv_agent_id,
test_type=test_type, metric=metric, dr_equiv=dr_equiv)
df = df.append(row_dict, ignore_index=True)
else:
rewards = self.run_seed_trials(
s, env_name, agent_id=agent_id, num_trials=num_trials,
video_record_episodes=video_record_episodes, py_env=py_env,
tf_env=tf_env)
row_dict = self.log_seed(rewards, s, env_name, agent_id,
test_type=test_type, metric=metric,
dr_equiv=dr_equiv)
df = df.append(row_dict, ignore_index=True)
return df
def log_seed(self, rewards, seed, env_name, agent_id=0, adv_id=None,
adv_agent_id=None, test_type='val', metric=None, dr_equiv=None):
"""Create a dictionary of all score metrics for a particular seed."""
print('Average return for', self.name, env_name, 'seed', seed, 'agent',
agent_id, '=', np.mean(rewards))
if adv_id:
print('\twith adversary', adv_id, 'and antagonist', adv_agent_id)
seed_dict = {
'seed': seed,
'checkpoint': self.checkpoint_nums[seed],
'domain_rand_comparable_checkpoint': dr_equiv,
'num_solved': np.sum(np.greater(rewards, 0)),
'sum': np.sum(rewards),
'mean': np.mean(rewards),
'std': np.std(rewards),
'min': np.min(rewards),
'max': np.max(rewards),
'n': len(rewards),
'agent_id': agent_id,
'env': env_name,
'name': self.name,
'exp_id': self.exp_id,
'run': test_type}
if metric is not None:
seed_dict['metric'] = metric
if adv_id is not None:
seed_dict['adv_id'] = adv_id
if adv_agent_id is not None:
seed_dict['adv_agent_id'] = adv_agent_id
return seed_dict
def create_env_with_adversary(self, policy, recorder):
"""Run adversary to create environment."""
encoded_images = None
# Add blank frames to make it easier to distinguish between runs/agents
if recorder:
for _ in range(self.num_blank_frames):
recorder.add_frame(self.blank_frame)
if self.save_matrices:
encoded_images = [self.blank_frame_encoding] * self.num_blank_frames
policy_state = policy.get_initial_state(1)
time_step = self.tf_env.reset()
if self.old_env:
time_step = backwards_compatible_timestep(time_step)
if self.verbose: print('\tAdversary is building the environment...')
for _ in range(self.py_env._gym_env.adversary_max_steps): # pylint: disable=protected-access
policy_step = policy.action(time_step, policy_state=policy_state)
policy_state = policy_step.state
time_step = self.tf_env.step_adversary(policy_step.action)
if self.old_env:
time_step = backwards_compatible_timestep(time_step)
if recorder:
recorder.add_frame(self.py_env.render())
if self.save_matrices:
encoded_images.append(self.py_env._gym_env.grid.encode()) # pylint:disable=protected-access
return encoded_images
def run_agent(self, policy, recorder, env_name, py_env, tf_env,
encoded_images=None):
"""Run an agent's policy in a particular environment. Possibly record."""
if self.save_matrices and encoded_images is None:
encoded_images = []
# Add blank frames to make it easier to distinguish between runs/agents
for _ in range(self.num_blank_frames):
if recorder:
recorder.add_frame(self.blank_frame)
if self.save_matrices:
encoded_images.append(self.blank_frame_encoding)
rewards = 0
policy_state = policy.get_initial_state(1)
if 'domain_randomization' in self.name and env_name == self.adv_env_name:
time_step = tf_env.reset_random()
elif 'Adversarial' in env_name:
time_step = tf_env.reset_agent()
else:
time_step = tf_env.reset()
if recorder:
recorder.add_frame(py_env.render())
if self.save_matrices:
encoded_images.append(self.py_env._gym_env.grid.encode()) # pylint:disable=protected-access
num_steps = tf.constant(0.0)
while True:
policy_step = policy.action(time_step, policy_state=policy_state)
policy_state = policy_step.state
next_time_step = tf_env.step(policy_step.action)
traj = trajectory.from_transition(time_step, policy_step, next_time_step)
time_step = next_time_step
num_steps += tf.math.reduce_sum(tf.cast(~traj.is_boundary(), tf.float32))
rewards += time_step.reward
if recorder:
recorder.add_frame(py_env.render())
if self.save_matrices:
encoded_images.append(self.py_env._gym_env.grid.encode()) # pylint:disable=protected-access
if traj.is_last():
break
return rewards.numpy().sum(), encoded_images
def backwards_compatible_timestep(new_ts):
"""Remove new observations added in later versions of the environment."""
old_obs = {
'image': new_ts.observation['image'],
'time_step': new_ts.observation['time_step']
}
return ts_lib.TimeStep(
new_ts.step_type,
new_ts.reward,
new_ts.discount,
old_obs)
def prioritize_experiments(experiments, videos_dir):
"""Prioritizes experiments based on recency of generated transfer videos."""
for exp in experiments:
exp_dir = os.path.join(*[videos_dir, exp.name, 'WallsAreLava'])
if tf.io.gfile.exists(exp_dir):
files = tf.io.gfile.listdir(exp_dir)
if files:
# Gets the update time of most recently updated file
update_times = [tf.io.gfile.stat(
os.path.join(exp_dir, f)).mtime for f in files]
update_times.sort()
exp.time_last_updated = update_times[-1]
experiments.sort(key=lambda x: x.time_last_updated)
return experiments
def test_experiment_in_environments(exp, envs, df, transfer_df_path,
unsuccessful_trials, test_type='val',
num_trials=25, debug=True, metric=None,
dr_ckpts=None):
"""Test checkpoints for an experiment in a collection of environments."""
for env in envs:
try:
if debug:
print('Testing', test_type, 'env', env)
df = exp.transfer_results[env] = exp.run_trials_in_env(
env, df=df, num_trials=num_trials, video_record_episodes=1,
test_type=test_type, metric=metric, dr_ckpts=dr_ckpts)
save_df_and_log(df, transfer_df_path)
except Exception as e: # pylint: disable=broad-except
logging.error('ERROR! with experiment %s in environment %s',
exp.name, env)
print(e)
print('\n')
unsuccessful_trials.append(exp.name + '_' + env)
return df
def load_existing_transfer_file(transfer_dir, transfer_csv, test_on_test=False,
mini_str='', name=''):
"""Load existing transfer file if it exists. Otherwise initialize new."""
# If filename of transfer csv was provided but does not exist, ignore.
if transfer_csv and not tf.io.gfile.exists(
os.path.join(transfer_dir, transfer_csv)):
print('Error! Could not find transfer CSV file', transfer_csv)
transfer_csv = None
if transfer_csv:
transfer_df_path = os.path.join(transfer_dir, transfer_csv)
with tf.gfile.GFile(transfer_df_path, 'rb') as f:
df = pd.read_csv(f)
print('Loaded existing transfer results file: %s', transfer_df_path)
else:
# Name file containing transfer results based on current time.
test_str = ''
if test_on_test:
test_str = 'test_'
csv_name = 'transfer_' + test_str + mini_str + name + '_results_' + \
datetime.datetime.now().strftime('%d.%m.%Y.%H:%M:%S') + '.csv'
transfer_df_path = os.path.join(transfer_dir, csv_name)
df = | pd.DataFrame() | pandas.DataFrame |
import unittest
from unittest.mock import MagicMock
import pandas as pd
from pandas.testing import assert_frame_equal
from data_export.models import DATA
from data_export.pipeline.formatters import (
DictFormatter,
FastTextCategoryFormatter,
JoinedCategoryFormatter,
ListedCategoryFormatter,
RenameFormatter,
TupledSpanFormatter,
)
TARGET_COLUMN = "labels"
class TestDictFormatter(unittest.TestCase):
def setUp(self):
self.return_value = {"label": "Label"}
label = MagicMock()
label.to_dict.return_value = self.return_value
self.dataset = pd.DataFrame([{TARGET_COLUMN: [label]}])
def test_format(self):
formatter = DictFormatter(TARGET_COLUMN)
dataset = formatter.format(self.dataset)
expected_dataset = pd.DataFrame([{TARGET_COLUMN: [self.return_value]}])
assert_frame_equal(dataset, expected_dataset)
class TestJoinedCategoryFormatter(unittest.TestCase):
def setUp(self):
self.return_value = "Label"
label = MagicMock()
label.to_string.return_value = self.return_value
self.dataset = pd.DataFrame([{TARGET_COLUMN: [label]}])
def test_format(self):
formatter = JoinedCategoryFormatter(TARGET_COLUMN)
dataset = formatter.format(self.dataset)
expected_dataset = pd.DataFrame([{TARGET_COLUMN: self.return_value}])
assert_frame_equal(dataset, expected_dataset)
class TestListedCategoryFormatter(unittest.TestCase):
def setUp(self):
self.return_value = "Label"
label = MagicMock()
label.to_string.return_value = self.return_value
self.dataset = pd.DataFrame([{TARGET_COLUMN: [label]}])
def test_format(self):
formatter = ListedCategoryFormatter(TARGET_COLUMN)
dataset = formatter.format(self.dataset)
expected_dataset = pd.DataFrame([{TARGET_COLUMN: [self.return_value]}])
assert_frame_equal(dataset, expected_dataset)
class TestTupledSpanFormatter(unittest.TestCase):
def setUp(self):
self.return_value = (0, 1, "Label")
label = MagicMock()
label.to_tuple.return_value = self.return_value
self.dataset = | pd.DataFrame([{TARGET_COLUMN: [label]}]) | pandas.DataFrame |
#%%
import sys
import psutil
import pandas as pd
import numpy as np
from tqdm import tqdm
import time
from sklearn.metrics import roc_auc_score
import torch
# HOME = "/home/scao/Documents/kaggle-riiid-test/"
# DATA_DIR = '/home/scao/Documents/kaggle-riiid-test/data/'
# MODEL_DIR = f'/home/scao/Documents/kaggle-riiid-test/model/'
HOME = os.path.abspath(os.path.join(os.path.dirname(__file__),".."))
sys.path.append(HOME)
MODEL_DIR = HOME+'/model/'
DATA_DIR = HOME+'/data/'
sys.path.append(HOME)
from sakt import *
from utils import *
from iter_env import *
NUM_SKILLS = 13523
PRIVATE = False
DEBUG = False
VAL_BATCH_SIZE = 4096
SIMU_PRI_SIZE = 250_000
SIMU_PUB_SIZE = 25_000
if DEBUG:
test_df = pd.read_parquet(DATA_DIR+'cv2_valid.parquet')
test_df[:SIMU_PUB_SIZE].to_parquet(DATA_DIR+'test_pub_simu.parquet')
#%%
# if __name__ == "__main__":
TRAIN_DTYPES = {
TIMESTAMP: 'int64',
USER_ID: 'int32',
CONTENT_ID: 'int16',
CONTENT_TYPE_ID:'int8',
TASK_CONTAINER_ID: 'int16',
TARGET: 'int8',
PRIOR_QUESTION_TIME: 'float32',
PRIOR_QUESTION_EXPLAIN: 'bool'
}
print("\nLoading train for inference...")
train_df = pd.read_parquet(DATA_DIR+'cv5_train.parquet',
columns=list(TRAIN_DTYPES.keys()))
train_df = train_df.astype(TRAIN_DTYPES)
print("Loaded train.")
print("\nLoading private simulated test set...")
if PRIVATE:
test_df = pd.read_parquet(DATA_DIR+'cv5_valid.parquet')
test_df = test_df[:SIMU_PRI_SIZE]
else:
test_df = | pd.read_parquet(DATA_DIR+'test_pub_simu.parquet') | pandas.read_parquet |
import streamlit as st
from pathlib import Path
import requests
import pandas as pd
from pandas.io.json import json_normalize
import base64
from crim_intervals import *
import ast
from itertools import tee, combinations
from typing import List
# import matplotlib
#sets up function to call Markdown File for "about"
def read_markdown_file(markdown_file):
return Path(markdown_file).read_text()
# download function for output csv's
def download_link(object_to_download, download_filename, download_link_text):
"""
Generates a link to download the given object_to_download.
object_to_download (str, pd.DataFrame): The object to be downloaded.
download_filename (str): filename and extension of file. e.g. mydata.csv, some_txt_output.txt
download_link_text (str): Text to display for download link.
Examples:
download_link(YOUR_DF, 'YOUR_DF.csv', 'Click here to download data!')
download_link(YOUR_STRING, 'YOUR_STRING.txt', 'Click here to download your text!')
"""
if isinstance(object_to_download,pd.DataFrame):
object_to_download = object_to_download.to_csv(index=False)
# some strings <-> bytes conversions necessary here
b64 = base64.b64encode(object_to_download.encode()).decode()
return f'<a href="data:file/txt;base64,{b64}" download="{download_filename}">{download_link_text}</a>'
# functions for pairs of ratios
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def get_ratios(input_list):
ratio_pairs = []
for a, b in pairwise(input_list):
ratio_pairs.append(b / a)
return ratio_pairs
# Here we group the rows in the DF by the Pattern Generating Match
# Each has its own string of durations, and duration ratios
# and then we compare the ratios to get the differences
# the "list(combinations)" method takes care of building the pairs, using data from our dataframe 'results'
def compare_ratios(ratios_1, ratios_2):
## division of lists
# using zip() + list comprehension
diffs = [i - j for i, j in zip(ratios_1, ratios_2)]
abs_diffs = [abs(ele) for ele in diffs]
sum_diffs = sum(abs_diffs)
return sum_diffs
#results["Pattern_Generating_Match"] = results["Pattern_Generating_Match"].apply(tuple)
def get_ratio_distances(results, pattern_col, output_cols):
matches = []
for name, group in results.groupby(pattern_col):
ratio_pairs = list(combinations(group.index.values, 2))
for a, b in ratio_pairs:
a_match = results.loc[a]
b_match = results.loc[b]
sum_diffs = compare_ratios(a_match.duration_ratios, b_match.duration_ratios)
match_dict = {
"pattern": name,
"sum_diffs": sum_diffs
}
for col in output_cols:
match_dict.update({
f"match_1_{col}": a_match[col],
f"match_2_{col}": b_match[col]
})
matches.append(match_dict)
return pd.DataFrame(matches)
# classifier output to pandas
def classified_matches_to_pandas(matches):
soggetti_matches = []
for i, cm in enumerate(matches):
for j, soggetti in enumerate(cm.matches):
soggetti_matches.append({
"piece": soggetti.first_note.metadata.title,
"type": cm.type,
"part": soggetti.first_note.part.strip("[] "),
"start_measure": soggetti.first_note.note.measureNumber,
"soggetto_number": j + 1,
"pattern": cm.pattern,
"match_number": i + 1
})
return pd.DataFrame(soggetti_matches)
# work lists
WorkList_mei = ['CA_Brumel_01.mei_msg.mei',
'CA_Brumel_02.mei_msg.mei',
'CA_Brumel_03.mei_msg.mei',
'CA_Brumel_04.mei_msg.mei',
'CA_Brumel_05.mei_msg.mei',
'CA_Guerrero_Missa_Congratulamini_1.mei_msg.mei',
'CA_Guerrero_Missa_Congratulamini_2.mei_msg.mei',
'CA_Guerrero_Missa_Congratulamini_3.mei_msg.mei',
'CA_Guerrero_Missa_Congratulamini_4.mei_msg.mei',
'CA_Guerrero_Missa_Congratulamini_5.mei_msg.mei',
'CA_Guerrero_Missa_Congratulamini_6.mei_msg.mei',
'CA_Guerrero_Missa_sancta_et_immaculata_1.mei_msg.mei',
'CA_Guerrero_Missa_sancta_et_immaculata_2.mei_msg.mei',
'CA_Guerrero_Missa_sancta_et_immaculata_3.mei_msg.mei',
'CA_Guerrero_Missa_sancta_et_immaculata_4.mei_msg.mei',
'CA_Guerrero_Missa_sancta_et_immaculata_5.mei_msg.mei',
'CA_Guerrero_Missa_sancta_et_immaculata_6.mei_msg.mei',
'CA_Lasso_Missa_Doulce_1.mei_msg.mei',
'CA_Lasso_Missa_Doulce_2.mei_msg.mei',
'CA_Lasso_Missa_Doulce_3.mei_msg.mei',
'CA_Lasso_Missa_Doulce_5.mei_msg.mei',
'CA_Sandrin_Doulce_msg.mei',
'CA_Recordare.mei_msg.mei'
]
# general headers for main window
#WorkList = ['CRIM_Model_0001.mei']
st.header("CRIM Project: CRIM Intervals")
st.subheader("Python Scripts for Analysis of Similar Soggetti in Citations: The Renaissance Imitation Mass")
st.write("Visit the [CRIM Project](https://crimproject.org) and its [Members Pages] (https://sites.google.com/haverford.edu/crim-project/home)")
st.write("Also see the [Relationship Metadata Viewer] (https://crim-relationship-data-viewer.herokuapp.com/) and [Observation Metadata Viewer] (https://crim-observation-data-viewer.herokuapp.com/)")
st.sidebar.subheader("Step 1: Choose Piece(s) from CRIM pending additions")
st.sidebar.write("Select Works with Menu, or Type ID, such as 'Model_0008'")
selected_works = st.sidebar.multiselect('', WorkList_mei)
print_list = pd.DataFrame(selected_works)
st.write("Your Selections:")
st.write(print_list)
#st.write(selected_works)
#st.write(selected_works)
# correct URL for MEI 4.0
WorkList_mei = [el.replace("CA_", "https://raw.githubusercontent.com/RichardFreedman/CRIM_additional_works/main/") for el in selected_works]
@st.cache(allow_output_mutation=True)
def load_corpusbase(WorkList_mei:List):
corpus = CorpusBase(WorkList_mei)
return corpus
# Now pass the list of MEI files to Crim intervals
#@st.cache(allow_output_mutation=True)
#if st.sidebar.button('Load Selections'):
corpus = load_corpusbase(WorkList_mei)
# Correct the MEI metadata
import xml.etree.ElementTree as ET
import requests
MEINSURI = 'http://www.music-encoding.org/ns/mei'
MEINS = '{%s}' % MEINSURI
for i, path in enumerate(WorkList_mei):
try:
if path[0] == '/':
mei_doc = ET.parse(path)
else:
mei_doc = ET.fromstring(requests.get(path).text)
# Find the title from the MEI file and update the Music21 Score metadata
title = mei_doc.find('mei:meiHead//mei:titleStmt/mei:title', namespaces={"mei": MEINSURI}).text
print(path, title)
corpus.scores[i].metadata.title = title
except:
continue
# Header
# Select Actual or Incremental Durations
st.sidebar.subheader("Step 2: Select Rhythmic Preference")
duration_choice = st.sidebar.radio('Select Actual or Incremental Durations', ["Actual","Incremental@1","Incremental@2", "Incremental@4"])
if duration_choice == 'Actual':
vectors = IntervalBase(corpus.note_list)
elif duration_choice == 'Incremental@1':
vectors = IntervalBase(corpus.note_list_incremental_offset(1))
elif duration_choice == 'Incremental@2':
vectors = IntervalBase(corpus.note_list_incremental_offset(2))
elif duration_choice == 'Incremental@4':
vectors = IntervalBase(corpus.note_list_incremental_offset(4))
else:
st.write("Please select duration preference")
# Select Generic or Semitone
st.sidebar.subheader("Step 3: Select Interval Preference")
scale_choice = st.sidebar.radio('Select Diatonic or Chromatic', ["Diatonic","Chromatic"])
if scale_choice == 'Diatonic':
scale = vectors.generic_intervals
elif scale_choice == 'Chromatic':
scale = vectors.semitone_intervals
else:
st.write("Please select duration preference")
# Select Vector Length and Minimum Number of Matches
st.sidebar.subheader("Step 4: Select Vectors, Matches, and Thresholds")
vector_length = st.sidebar.number_input("Enter a Number for the Length of Vector, or use default", min_value=1, max_value=20, value=5)
minimum_match = st.sidebar.number_input("Enter a Minimum Number of Matches, or use default", min_value=1, max_value=20, value=3)
close_distance = st.sidebar.number_input("If Close Search, then Enter Threshold, or use default", min_value=1, max_value=20, value=2)
max_sum_diffs = st.sidebar.number_input("Enter Maximum Durational Differences (Whole Number or Fractional Value), or use default", min_value=None, max_value=None, value=2)
patterns = into_patterns([scale], vector_length)
search_summary_key = "Key: VE = Number of Melodic Vectors, MM = Minimum Matches, CD = Melodic Close Distance, DD = Maximum Sum of Durational Differences"
close_short_search_summary = "{}_{}_V{}_M{}_C{}".format(duration_choice, scale_choice, vector_length, minimum_match, close_distance)
exact_short_search_summary = "{}_{}_V{}_M{}".format(duration_choice, scale_choice, vector_length, minimum_match)
close_dur_short_search_summary = "{}_{}_V{}_M{}_C{}_D{}".format(duration_choice, scale_choice, vector_length, minimum_match, close_distance, max_sum_diffs)
exact_dur_short_search_summary = "{}_{}_V{}_M{}_D{}".format(duration_choice, scale_choice, vector_length, minimum_match, max_sum_diffs)
# Select Exact or Close
st.sidebar.subheader("Step 5: Search for Similar Melodies")
st.sidebar.write("Adjust Time and Melodic Scales, Vectors, Minimum Matches, and Melodic Flex in Steps 2, 3, 4 at left, or use defaults")
if st.sidebar.button('Run Exact Search'):
patterns = into_patterns([scale], vector_length)
find_matches = find_exact_matches(patterns, minimum_match)
output = export_pandas(find_matches)
#pd.DataFrame(output).head()
results = pd.DataFrame(output)
st.subheader('Key Values for Your Exact Search: ')
st.write(exact_short_search_summary)
st.text("(use this for CSV title or notes)")
st.write('Results of Exact Melodic Pattern Search')
st.write(results)
st.subheader("Optional: Download CSV of Exact Melodies from Step 5")
s1 = st.text_input('Provide filename for melodic pattern match download (must include ".csv")')
tmp_download_link = download_link(results, s1, 'Click here to download your data!')
st.markdown(tmp_download_link, unsafe_allow_html=True)
if st.sidebar.button('Run Close Search'):
patterns = into_patterns([scale], vector_length)
find_matches = find_close_matches(patterns, minimum_match, close_distance)
output = export_pandas(find_matches)
#pd.DataFrame(output).head()
st.subheader('Key Values for Your Close Search: ')
st.write(close_short_search_summary)
st.text("(use this for CSV title or notes)")
results = pd.DataFrame(output)
st.write('Results of Close Melodic Pattern Search')
st.write(results)
st.subheader("Optional: Download CSV of Close Melodies from Step 5")
s2 = st.text_input('Provide filename for melodic pattern match download (must include ".csv")')
tmp_download_link = download_link(results, s2, 'Click here to download your data!')
st.markdown(tmp_download_link, unsafe_allow_html=True)
st.sidebar.subheader("Step 6: Search with Duration Filter")
st.sidebar.write("Threshold of Differences between Durational Ratios, or use default, above")
if st.sidebar.button('Run Exact Search with Duration Filter'):
patterns = into_patterns([scale], vector_length)
find_matches = find_exact_matches(patterns, minimum_match)
output = export_pandas(find_matches)
results = pd.DataFrame(output)
st.subheader('Key Values for Your Exact Search with Durational Distance: ')
st.write(exact_dur_short_search_summary)
st.text("(use this for CSV title or notes)")
st.write('Results of Exact Melodic Pattern Search: Durational Ratios Unfiltered')
# clean-up results of melodic match: chance patterns to tuples
results["pattern_generating_match"] = results["pattern_generating_match"].apply(tuple)
# evaluation Note_Durations as literals--only needed if we're importing CSV
#results['note_durations'] = results['note_durations'].apply(ast.literal_eval)
durations = results['note_durations']
# calculates 'duration ratios' for each soggetto, then adds this to the DF
results["duration_ratios"] = results.note_durations.apply(get_ratios)
st.write(results)
# now we calculate the _distances_ between pairs of ratios
ratio_distances = get_ratio_distances(results, "pattern_generating_match", ["piece_title", "part", "start_measure", "end_measure"])
ratios_filtered = ratio_distances[ratio_distances.sum_diffs <= max_sum_diffs]
st.write("Results with Filtered Distances of Durational Ratios")
st.write(ratios_filtered)
st.subheader("Optional: Download CSV of Exact Melodies and Durations from Step 6")
s3 = st.text_input('Provide filename for durational match download (must include ".csv")')
tmp_download_link = download_link(ratios_filtered, s3, 'Click here to download your data!')
st.markdown(tmp_download_link, unsafe_allow_html=True)
if st.sidebar.button('Run Close Search with Duration Filter'):
patterns = into_patterns([scale], vector_length)
find_matches = find_close_matches(patterns, minimum_match, close_distance)
output = export_pandas(find_matches)
results = pd.DataFrame(output)
st.subheader('Key Values for Your Close Search with Durational Distance: ')
st.write(close_dur_short_search_summary)
st.text("(use this for CSV title or notes)")
# clean-up results of melodic match: chance patterns to tuples
results["pattern_generating_match"] = results["pattern_generating_match"].apply(tuple)
# evaluation Note_Durations as literals--only needed if we're importing CSV
#results['note_durations'] = results['note_durations'].apply(ast.literal_eval)
durations = results['note_durations']
# calculates 'duration ratios' for each soggetto, then adds this to the DF
results["duration_ratios"] = results.note_durations.apply(get_ratios)
st.write("Results of Close Search: Durational Ratios Unfiltered")
st.write(results)
# now we calculate the _distances_ between pairs of ratios
ratio_distances = get_ratio_distances(results, "pattern_generating_match", ["piece_title", "part", "start_measure", "end_measure"])
ratios_filtered = ratio_distances[ratio_distances.sum_diffs <= max_sum_diffs]
sort_by_measure = ratios_filtered.sort_values(["match_1_start_measure"])
st.write("Results with Filtered Distances of Durational Ratios")
st.write(sort_by_measure)
st.subheader("Optional: Download CSV of Close Melodies and Durations from Step 6")
s4 = st.text_input('Provide filename for durational match download (must include ".csv")')
tmp_download_link = download_link(sort_by_measure, s4, 'Click here to download your data!')
st.markdown(tmp_download_link, unsafe_allow_html=True)
# Classify Presentation Types
st.sidebar.subheader("Step : Classify Presentation Types")
st.sidebar.write('One Work at a Time!')
st.sidebar.write()
st.sidebar.write("Set Threshold of Durational Differences for Presentation Types.")
st.sidebar.write("NB: This is different from the durational difference threshold set above.")
st.sidebar.write()
max_sum_diffs_classify = st.sidebar.number_input("Enter Maximum Durational Differences Among Soggetti to be Classified (whole number only)", min_value=None, max_value=None, value=1)
if st.sidebar.button('Run Classifier with Exact Search'):
find_matches = find_exact_matches(patterns, minimum_match)
classified_matches = classify_matches(find_matches, max_sum_diffs_classify)
classfied_output = classified_matches_to_pandas(classified_matches)
classified_results = | pd.DataFrame(classfied_output) | pandas.DataFrame |
import gensim
import numpy as np
import pandas as pd
import re
import os
import time
import jieba
import cv2
import json
import urllib
import random
import hashlib
from snownlp import sentiment
from snownlp import SnowNLP
import jieba.posseg as pseg
from gensim.models import word2vec
import logging
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision import models, transforms
from PIL import Image
from tensorflow.keras.applications import vgg19
from tensorflow.keras.applications import resnet50
from tensorflow.keras.preprocessing import image
from collections import Counter
from scipy.linalg import norm
train_csv_path = r'G:\毕设\数据集\微博\train.csv'
text_csv_path = r'G:\毕设\数据集\微博\text.csv'
user_csv_path = r'G:\毕设\数据集\微博\user.csv'
image_csv_path = r'G:\毕设\数据集\微博\image.csv'
en_imagenet_class_path = r'G:\毕设\数据集\微博\imagenet_class_index.json'
cn_imagenet_class_path = r'G:\毕设\数据集\微博\imagenet_class_cn.json'
image_class_vgg19_score_path = r'G:\毕设\数据集\微博\image_class_vgg19.txt'
image_class_resnet50_score_path = r'G:\毕设\数据集\微博\image_class_resnet50.txt'
train_negative_corpus_path = os.path.abspath(os.path.dirname(os.getcwd())+os.path.sep+".")+'/util/negative.txt'
train_positive_corpus_path = os.path.abspath(os.path.dirname(os.getcwd())+os.path.sep+".")+'/util/positive.txt'
sentiment_model_path = os.path.abspath(os.path.dirname(os.getcwd())+os.path.sep+".")+'/util/sentiment.marshal'
stopwords_path = os.path.abspath(os.path.dirname(os.getcwd())+os.path.sep+".")+"/util/stopwords.txt"
word2vec_txt_path = os.path.abspath(os.path.dirname(os.getcwd())+os.path.sep+".")+"/util/word2vec_corpus.txt"
word2vec_model_path = os.path.abspath(os.path.dirname(os.getcwd())+os.path.sep+".")+"/util/text8.model"
possentiwords_path = os.path.abspath(os.path.dirname(os.getcwd())+os.path.sep+".")+"/util/possentiwords.txt"
negsentiwords_path = os.path.abspath(os.path.dirname(os.getcwd())+os.path.sep+".")+"/util/negsentiwords.txt"
appid = '20190716000318328'
secretKey = '<KEY>'
url_baidu = 'http://api.fanyi.baidu.com/api/trans/vip/translate'
def train_data_read(train_csv_path):
"""
训练数据的读入
df_text 文本信息列
df_user 用户信息列
df_image 图片信息列
"""
logging.info("正在载入数据中...")
#微博信息
df_text = pd.read_csv(train_csv_path,usecols=['id','text','category','label'])
#用户信息
df_user = pd.read_csv(train_csv_path,usecols=['id','userGender','userFollowCount','userFansCount','userWeiboCount','userLocation','userDescription'])
#微博图片信息
df_image = pd.read_csv(train_csv_path,usecols=['id','piclist'])
logging.info("数据载入完成")
return df_text,df_user,df_image
def text_data_read():
'''
文本特征文件的读取
:return: 文本特征文件
'''
df_text = pd.read_csv(text_csv_path)
return df_text
def text_insert_cols(df_text,new_features_list):
'''
增加文本新的特征列,方便后续提取并补充值
:param df_text: 文本信息
:return: df_text: 新文本信息dataframe
'''
logging.info("正在扩展文本新特征列...")
col_name = list(df_text.columns)
# 插入新列之前列名去重
col_name = col_name + sorted(set(new_features_list) - set(col_name), key=new_features_list.index)
df_text = df_text.reindex(columns=col_name, fill_value=0)
logging.info("文本新特征列扩展完成")
return df_text
def text_feature_extraction(df_text):
logging.info("开始文本特征提取...")
# #统计字符串长度
# df_text['text_length'] = df_text['text'].str.len()
# #将情感分数列转为float
# df_text['sentiment_score'] = df_text['sentiment_score'].astype(float)
for j in range(1,65):
df_text['word2vec_'+str(j)] = df_text['word2vec_'+str(j)].astype(float)
# #其余数据统计
i = 0
for index, row in df_text.iterrows():
logging.info("处理进度"+str(i+1)+"/"+str(df_text.shape[0]))
#获得需要处理的文本内容
text_content = row['text']
# #获得是否含有问号以及问号的数量
# if row['num_questmarks'] > 0:
# df_text.at[i, 'contains_questmark'] = 1
# df_text.at[i,'contains_questmark'], df_text.at[i,'num_questmarks'] = text_questmark(text_content)
# #获得是否含有感叹号以及感叹号的数量
# if row['num_exclammarks'] > 0:
# df_text.at[i, 'contains_exclammark'] = 1
# df_text.at[i, 'contains_exclammark'], df_text.at[i, 'num_exclammarks'] = text_exclammark(text_content)
# #获得是否含有hashtag以及hashtag的数量
# if row['num_hashtags'] > 0:
# df_text.at[i, 'contains_hashtag'] = 1
# df_text.at[i, 'contains_hashtag'], df_text.at[i, 'num_hashtags'] = text_hashtag(text_content)
# #获得是否含有url以及url的数量
# if row['num_URLs'] > 0:
# df_text.at[i, 'contains_URL'] = 1
# df_text.at[i, 'contains_URL'], df_text.at[i, 'num_URLs'] = text_url(text_content)
# #获得是否含有@以及@的数量
# if row['num_mentions'] > 0:
# df_text.at[i, 'contains_mention'] = 1
# df_text.at[i, 'contains_mention'], df_text.at[i, 'num_mentions'] = text_mention(text_content)
# #获得文本情感分数
# df_text.at[i, 'sentiment_score'] = text_sentiment_score(text_content)
# #词性标注,统计名词、动词、代词数量并返回
# df_text.at[i, 'num_noun'],df_text.at[i, 'num_verb'],df_text.at[i, 'num_pronoun'] = text_part_of_speech(text_content)
# #计算每条微博正文的词向量均值
df_text.at[i,-64:] = text_compute_word2vec(text_content).tolist()
# #获得每条微博的积极词汇数、消极词汇数
# df_text.at[i, 'num_possentiwords'], df_text.at[i, 'num_negsentiwords'] = text_pos_neg_sentiwords(text_content)
#获取新闻是否含有第一人称、第二人称、第三人称
# df_text.at[i, 'contains_firstorderpron'], df_text.at[i, 'contains_secondorderpron'], df_text.at[i, 'contains_thirdorderpron'] = text_get_fir_sec_thi_orderpron(text_content)
i += 1
logging.info("文本特征提取结束...")
return df_text
def text_get_fir_sec_thi_orderpron(text_content):
"""
统计第一、二、三人称是否存在于微博中
:param text_content:
:return: has_first, has_second, has_third(0:不包含,1:包含)
"""
has_first = 0 #第一人称
has_second = 0 #第二人称
has_third = 0 #第三人称
if text_content.find('我') != -1:
has_first = 1
elif text_content.find('你') != -1:
has_second = 1
elif text_content.find('他') != -1 or text_content.find('她') != -1 or text_content.find('它') != -1:
has_third = 1
return has_first, has_second, has_third
def text_pos_neg_sentiwords(text_content):
# 去除停用词的分词String
new_text_content = jieba_clear_text(text_content)
#将词组转成list
list_new_text_content = new_text_content.split(' ')
#统计积极词、消极词
num_pos = 0
num_neg = 0
for word in list_new_text_content:
if word in possentiwords:
num_pos += 1
elif word in negsentiwords:
num_neg += 1
return num_pos,num_neg
def text_part_of_speech(text_content):
"""
将文本中的汉字进行词性标注并返回数量
:param text_content: 文本信息
:return: n名词数量,v动词数量,r代词数量
"""
#选取所有的汉字
if pd.isna(text_content):
return 0,0,0
words = pseg.cut("".join(re.findall(u"[\u4e00-\u9fa5]",text_content)))
n = 0 #名词数量
r = 0 #代词数量
v = 0 #动词数量
for w in words:
if (w.flag.startswith('n')):
n += 1
elif (w.flag.startswith('v')):
v += 1
elif (w.flag.startswith('r')):
r += 1
return n,v,r
def text_questmark(text_content):
"""
处理文本中的问号
:param text_content:处理对象文本
:return: 是否含有问号(1:有,0:无),问号数量
"""
en_questmark_nums = text_content.count("?")
cn_questmark_nums = text_content.count("?")
if(en_questmark_nums + cn_questmark_nums > 0):
return 1,en_questmark_nums + cn_questmark_nums
else:
return 0,0
def text_train_sentiment():
#微博语料训练
sentiment.train(train_negative_corpus_path,train_positive_corpus_path)
#保存模型,同时修改snownlp->sentiment->__init__.py->data_path
sentiment.save(sentiment_model_path)
def text_sentiment_score(text_content):
"""
获得文本的情感分数
0<------------------>1
消极 积极
:param text_content: 处理对象文本
:return: sentiment_score.sentiments 情感分数
"""
if pd.isna(text_content):
return 0
#去除停用词
new_text_content = jieba_clear_text(text_content)
try:
sentiment_score = SnowNLP(new_text_content).sentiments
except:
return 0
return sentiment_score
def jieba_clear_text(text):
"""
jieba分词,并使用自定义停用词表去除停用词以及长度为1的词
"""
text_n= "".join(re.findall(u"[\u4e00-\u9fa5]", text))
raw_result = "$".join(jieba.cut(text_n))
myword_list = []
#去除停用词
for myword in raw_result.split('$'):
if myword not in stopwords:
myword_list.append(myword)
return " ".join(myword_list)
def get_stopwords_list():
"""
获得停用词的列表
:return: stopwords:停用词列表
"""
my_stopwords = []
fstop = open(stopwords_path, "r", encoding='UTF-8')
for eachWord in fstop.readlines():
my_stopwords.append(eachWord.strip())
fstop.close()
return my_stopwords
def get_possentiwords_list():
"""
获得积极词汇列表
:return:
"""
my_possentiwords = []
fp = open(possentiwords_path, "r", encoding='UTF-8')
for eachWord in fp.readlines():
my_possentiwords.append(eachWord.strip())
fp.close()
return my_possentiwords
def get_negsentiwords_list():
"""
获得消极词汇列表
:return:
"""
my_negsentiwords = []
fn = open(negsentiwords_path, "r", encoding='UTF-8')
for eachWord in fn.readlines():
my_negsentiwords.append(eachWord.strip())
fn.close()
return my_negsentiwords
def text_exclammark(text_content):
"""
处理文本中的感叹号
:param text_content:处理对象文本
:return: 是否含有感叹(1:有,0:无),感叹数量
"""
en_exclammark_nums = text_content.count("!")
cn_exclammark_nums = text_content.count("!")
if(en_exclammark_nums + cn_exclammark_nums > 0):
return 1,en_exclammark_nums + cn_exclammark_nums
else:
return 0,0
def text_hashtag(text_content):
"""
判断文本中是否存在hashtag
微博中hashtag由两个#构成,例如 #毕业设计#
:param text_content: 处理对象文本
:return: 是否含有hashtag(1:有,0:无),hashtag数量
"""
hashtag_nums = text_content.count("#")
if(hashtag_nums == 0):
return 0,0
else:
return 1,hashtag_nums/2
def text_url(text_content):
"""
判断文本中是否存在微博URL
:param text_content: 处理对象文本
:return: 是否含有url(1:有,0:无),url数量
"""
url = re.findall('https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+', text_content)
if url:
return 1,len(url)
else:
return 0,0
def text_mention(text_content):
"""
处理微博正文中的用户@
:param text_content: 处理对象文本
:return: 是否含有@(1:有,0:无),@数量
"""
mention_nums = text_content.count("@")
if(mention_nums > 0):
return 1,mention_nums
else:
return 0,0
def text_train_word2vec_model(word2vec_txt_path,word2vec_model_path):
"""
训练word2vec词向量模型
:param word2vec_txt_path: 语料路径
:param word2vec_model_path: 模型保存路径
:return: 词向量模型
"""
sentences = word2vec.Text8Corpus(word2vec_txt_path)
model = word2vec.Word2Vec(sentences,size=100,workers=4)
# 1.sentences:可以是一个List,对于大语料集,建议使用BrownCorpus,Text8Corpus或·ineSentence构建。
# 2.sg: 用于设置训练算法,默认为0,对应CBOW算法;sg=1则采用skip-gram算法。
# 3.size:是指输出的词的向量维数,默认为100。大的size需要更多的训练数据,但是效果会更好. 推荐值为几十到几百。
# 4.window:为训练的窗口大小,8表示每个词考虑前8个词与后8个词(实际代码中还有一个随机选窗口的过程,窗口大小<=5),默认值为5。
# 5.alpha: 是学习速率
# 6.seed:用于随机数发生器。与初始化词向量有关。
# 7.min_count: 可以对字典做截断. 词频少于min_count次数的单词会被丢弃掉, 默认值为5。
# 8.max_vocab_size: 设置词向量构建期间的RAM限制。如果所有独立单词个数超过这个,则就消除掉其中最不频繁的一个。每一千万个单词需要大约1GB的RAM。设置成None则没有限制。
# 9.sample: 表示 采样的阈值,如果一个词在训练样本中出现的频率越大,那么就越会被采样。默认为1e-3,范围是(0,1e-5)
# 10.workers:参数控制训练的并行数。
# 11.hs: 是否使用HS方法,0表示: Negative Sampling,1表示:Hierarchical Softmax 。默认为0
# 12.negative: 如果>0,则会采用negative samping,用于设置多少个noise words
# 13.cbow_mean: 如果为0,则采用上下文词向量的和,如果为1(default)则采用均值。只有使用CBOW的时候才起作用。
# 14.hashfxn: hash函数来初始化权重。默认使用python的hash函数
# 15.iter: 迭代次数,默认为5。
# 16.trim_rule: 用于设置词汇表的整理规则,指定那些单词要留下,哪些要被删除。可以设置为None(min_count会被使用)或者一个接受()并返回RU·E_DISCARD,uti·s.RU·E_KEEP或者uti·s.RU·E_DEFAU·T的函数。
# 17.sorted_vocab: 如果为1(defau·t),则在分配word index 的时候会先对单词基于频率降序排序。
# 18.batch_words:每一批的传递给线程的单词的数量,默认为10000
model.save(word2vec_model_path)
return model
def text_load_word2vec_model(word2vec_model_path):
"""
加载训练完成的word2vec词向量模型
:param word2vec_model_path: 模型路径
:return: 词向量模型
"""
model = word2vec.Word2Vec.load(word2vec_model_path)
return model
def text_get_clear_word2vec_corpus(word2vec_txt_path):
"""
从原始微博文本获得word2vec语料文本
:param word2vec_txt_path: 语料保存位置
:return: 0
"""
with open(word2vec_txt_path, 'a') as f:
for index, row in df_text.iterrows():
text_content = row['text']
raw_txt = jieba_clear_text("".join(re.findall(u"[\u4e00-\u9fa5]", text_content)))
f.write(raw_txt + "\n")
logging.info("清理word2vec语料文本结束")
def text_compute_word2vec(text_content):
if pd.isna(text_content):
return np.zeros(64)
raw_txt_list = jieba_clear_text("".join(re.findall(u"[\u4e00-\u9fa5]", text_content))).split(' ')
text_word2vec_score_list = []
for word in raw_txt_list:
try:
#自己训练的词库用这一句
# text_word2vec_score_list.append(model_word2vec.wv[word])
text_word2vec_score_list.append(model_word2vec[word])
except KeyError:
text_word2vec_score_list.append(np.zeros(64))
result_mean_array = np.mean(np.array(text_word2vec_score_list),axis=0)
return result_mean_array
def user_data_read():
"""
用户特征文件的读取
:return: 用户特征文件
"""
df_user = pd.read_csv(user_csv_path)
return df_user
def user_insert_cols(df_user,new_features_list):
"""
增加用户新的特征列,方便后续提取并补充值
:param df_user: 用户信息
:return: df_user: 新用户信息dataframe
"""
logging.info("正在扩展用户新特征列...")
col_name = list(df_user.columns)
col_name = col_name + new_features_list
df_user = df_user.reindex(columns=col_name, fill_value=0)
logging.info("用户新特征列扩展完成")
return df_user
def user_feature_extraction(df_user):
logging.info("开始用户特征提取...")
#将 关注/粉丝比 列转为float
df_user['folfans_ratio'] = df_user['folfans_ratio'].astype(float)
#其余数据统计
i = 0
for index, row in df_user.iterrows():
logging.info("处理进度"+str(i+1)+"/"+str(df_user.shape[0]))
#获得需要处理的文本内容
user_follow_count = row['user_follow_count']
user_fans_count = row['user_fans_count']
#计算 关注/粉丝比
df_user.at[i,'folfans_ratio'] = user_compute_folfans_ratio(user_follow_count,user_fans_count)
i += 1
logging.info("用户特征提取结束...")
return df_user
def user_compute_folfans_ratio(user_follow_count,user_fans_count):
"""
计算关注/粉丝比
:param user_follow_count: 关注数
:param user_fans_count: 粉丝数
:return:
"""
if( user_fans_count == 0):
return 0
else:
return user_follow_count/user_fans_count
def image_data_read():
"""
图片特征文件的读取
:return: 图片特征文件
"""
df_image = pd.read_csv(image_csv_path)
return df_image
def image_insert_cols(df_image,new_features_list):
"""
增加图片新的特征列,方便后续提取并补充值
:param df_image: 图片信息
:return: df_image: 新图片信息dataframe
"""
logging.info("正在扩展图片新特征列...")
col_name = list(df_image.columns)
#插入新列之前列名去重
col_name = col_name + sorted(set(new_features_list) - set(col_name), key = new_features_list.index)
df_image = df_image.reindex(columns=col_name, fill_value=0)
logging.info("图片新特征列扩展完成")
return df_image
def image_feature_extraction(df_image):
logging.info("开始图片特征提取...")
#将第三列到最后列转为float
df_image.iloc[:,-2048:] = df_image.iloc[:,-2048:].astype(float)
# df_image.iloc[:, -2:] = df_image.iloc[:, -2:].astype(object)
# return df_image
# df_image['sim_image_word'] = df_image['sim_image_word'].astype(float)
#其余数据统计
i = 0
image_name = []
for index, row in df_image.iterrows():
logging.info("处理进度"+str(i+1)+"/"+str(df_image.shape[0]))
#获得需要处理的文本内容
if (pd.isna(df_image.iloc[i,1])):
i += 1
continue
else:
image_list = row['piclist'].split('\t')
# 计算 颜色矩
# filename1 = 'G:/train/rumor_pic/' + image_list[0]
# filename2 = 'G:/train/truth_pic/' + image_list[0]
filename1 = 'G:/test/rumor_images/' + image_list[0]
filename2 = 'G:/test/nonrumor_images/' + image_list[0]
filename= ''
if (os.path.isfile(filename1)):
filename = filename1
else:
filename = filename2
#计算颜色矩
# df_image.at[i, -9:] = image_color_moments(filename)
#计算深度学习特征 ---PyTorch ResNet50 CNN
try:
df_image.at[i, -2048:] = image_resnet_cnn(filename,model_resnet50)
except Exception as e:
logging.info("图片有问题"+str(e))
# df_image['tf_vgg19_class'] = image_get_class(filename)
# # 获得图片的宽度、高度、k物理大小kb
# df_image.at[i, 'image_width'], df_image.at[i, 'image_height'], df_image.at[i, 'image_kb'] = image_get_width_height_kb(filename)
# #计算图文相似度,当存在多张图片的时候采用第一张图片作为该博文的代表图片
# df_image.at[i, 'sim_image_word'] = image_get_img_word_sim(i, row['tf_vgg19_class'], row['tf_resnet50_class'])
i += 1
logging.info("图片特征提取结束...")
return df_image
def image_get_img_word_sim(index, vgg19_class_name, resnet50_class_name):
"""
similarity_score = arg max{ log( f_i * c_j * swv(term_i,term_j) ) }
1 ≤ i ≤ n, 1 ≤ j ≤m
swv(term_i,term_j)即term_i和term_j词向量的余弦相似度
f_i即第i个词汇(微博正文)的词频
c_j即第j个词汇(图片分类名)的可信度
"""
#微博正文
text_content = df_text['text'][index]
if pd.isna(text_content):
return 0
#去除停用词和英文单词并分词为list
list_clear_weibo_text = jieba_clear_text("".join(re.findall(u"[\u4e00-\u9fa5]", text_content))).split(' ')
#获得微博正文的词频
dict_weibo_text = Counter(list_clear_weibo_text)
#获得分类的词向量
try:
#获取单词的词向量
term_vgg19_class_name = model_word2vec[dict_image_class[vgg19_class_name]]
except Exception:
#word2vec中不存在这个词汇,以64位0补充
term_vgg19_class_name = np.zeros(64)
try:
#获取单词的词向量
term_resnet50_class_name = model_word2vec[dict_image_class[resnet50_class_name]]
except Exception:
#word2vec中不存在这个词汇,以64位0补充
term_resnet50_class_name = np.zeros(64)
list_vgg19_sim = []
list_resnet50_sim = []
#遍历微博正文词频表
for(word, frequency) in dict_weibo_text.items():
try:
#获取单词的词向量
term_i = model_word2vec[word]
except Exception:
#word2vec中不存在这个词汇,以64位0补充
term_i = np.zeros(64)
if np.all(term_i == 0):
list_vgg19_sim.append(0)
list_resnet50_sim.append(0)
continue
if np.all(term_vgg19_class_name == 0):
list_vgg19_sim.append(0)
if np.all(term_resnet50_class_name == 0):
list_resnet50_sim.append(0)
if np.all(term_vgg19_class_name != 0):
# 计算余弦相似度
swv_vgg19 = np.dot(term_i, term_vgg19_class_name) / (norm(term_i) * norm(term_vgg19_class_name))
# 计算图文相似度
list_vgg19_sim.append(np.log(1 + frequency * float(list_vgg19_score[index]) * swv_vgg19))
if np.all(term_resnet50_class_name != 0):
#计算余弦相似度
swv_resnet50 = np.dot(term_i, term_resnet50_class_name) / (norm(term_i) * norm(term_resnet50_class_name))
#计算图文相似度
list_resnet50_sim.append(np.log(1 + frequency*float(list_resnet50_score[index])*swv_resnet50))
similarity_score = (max(list_vgg19_sim,default=0) + max(list_resnet50_sim,default=0)) / 2
print(similarity_score)
return similarity_score
def image_get_score_list(image_class_vgg19_score_path, image_class_resnet50_score_path):
#获得vgg19和resnet50分类时的可信度
with open(image_class_vgg19_score_path, "r", encoding='UTF-8') as f1:
str_vgg19_score = f1.read()
#分数以空格分开,将str转成list
list_vgg19_score = str_vgg19_score.split(" ")
with open(image_class_resnet50_score_path, "r", encoding='UTF-8') as f2:
str_resnet50_score = f2.read()
#分数以空格分开,将str转成list
list_resnet50_score = str_resnet50_score.split(" ")
return list_vgg19_score, list_resnet50_score
def image_get_width_height_kb(img_path):
try:
im = Image.open(img_path) # 返回一个Image对象
except:
return 0, 0, 0
fsize = os.path.getsize(img_path)
fsize = fsize / float(1024)
return im.size[0], im.size[1], round(fsize, 2)
def image_color_moments(filename):
"""
提取图像颜色矩
:param filename: 文件路径名
:return: color_feature:颜色矩特征
"""
img = cv2.imread(filename)
if img is None:
return
# Convert BGR to HSV colorspace OpenCV 默认的颜色空间是 BGR,类似于RGB,但不是RGB
# HSV颜色空间的色调、饱和度、明度与人眼对颜色的主观认识相对比较符合,与其他颜色空间相比HSV空间能更好的反映人类对颜色的感知
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# Split the channels - h,s,v
h, s, v = cv2.split(hsv)
# Initialize the color feature
color_feature = []
# N = h.shape[0] * h.shape[1]
# The first central moment - average 一阶矩(均值)
h_mean = np.mean(h) # np.sum(h)/float(N)
s_mean = np.mean(s) # np.sum(s)/float(N)
v_mean = np.mean(v) # np.sum(v)/float(N)
color_feature.extend([h_mean, s_mean, v_mean])
# The second central moment - standard deviation 二阶矩(方差)
h_std = np.std(h) # np.sqrt(np.mean(abs(h - h.mean())**2))
s_std = np.std(s) # np.sqrt(np.mean(abs(s - s.mean())**2))
v_std = np.std(v) # np.sqrt(np.mean(abs(v - v.mean())**2))
color_feature.extend([h_std, s_std, v_std])
# The third central moment - the third root of the skewness 三阶矩(斜度)
h_skewness = np.mean(abs(h - h.mean())**3)
s_skewness = np.mean(abs(s - s.mean())**3)
v_skewness = np.mean(abs(v - v.mean())**3)
h_thirdMoment = h_skewness**(1./3)
s_thirdMoment = s_skewness**(1./3)
v_thirdMoment = v_skewness**(1./3)
color_feature.extend([h_thirdMoment, s_thirdMoment, v_thirdMoment])
return color_feature
class net(nn.Module):
def __init__(self):
super(net, self).__init__()
# resnet50
self.net = models.resnet50(pretrained=True)
def forward(self, input):
output = self.net.conv1(input)
output = self.net.bn1(output)
output = self.net.relu(output)
output = self.net.maxpool(output)
output = self.net.layer1(output)
output = self.net.layer2(output)
output = self.net.layer3(output)
output = self.net.layer4(output)
output = self.net.avgpool(output)
return output
def image_resnet_cnn(img_path, net):
transform = transforms.Compose([
#图片变换为256*256
transforms.Resize((256,256)),
#用来将图片从中心裁剪成224*224
transforms.CenterCrop((224,224)),
#将图片转成Tensor张量
transforms.ToTensor()]
)
#读入图片并进行统一转换
try:
img = Image.open(img_path)
img = transform(img)
logging.info(img.shape)
x = Variable(torch.unsqueeze(img, dim=0).float(), requires_grad=False)
logging.info(x.shape)
# 启用GPU加速
if torch.cuda.is_available():
x = x.cuda()
net = net.cuda()
# 转回CPU,不然可能出错
y = net(x).cpu()
y = torch.squeeze(y)
cnn_features = y.data.numpy().tolist()
logging.info(y.shape)
return cnn_features
except:
return np.zeros(2048).tolist()
def image_get_class(img_path):
img_array = []
for i in img_path:
if (i == 'nothing'):
img_array.append('no')
else:
img = image.load_img(i, target_size=(224, 224))
# 将图片转换为(224,224,3)数组,最后的3是因为RGB三色彩图
img = image.img_to_array(img)
# 跟前面的例子一样,使用模型进行预测是批处理模式,
# 所以对于单个的图片,要扩展一维成为(1,224,224,3)这样的形式
# 相当于建立一个预测队列,但其中只有一张图片
img = np.expand_dims(img, axis=0)
predict_class_vgg = model_tf_vgg19.predict(img)
# 获取图片识别可能性最高的3个结果
desc_vgg = vgg19.decode_predictions(predict_class_vgg, top=1)
# desc = resnet50.decode_predictions(predict_class, top=3)
# 我们的预测队列中只有一张图片,所以结果也只有第一个有效,显示出来
img_array.append(desc_vgg[0][0][1])
print(i)
# 使用模型预测(识别)
return img_array
def translateBaidu(text, f='en', t='zh'):
salt = random.randint(32768, 65536)
sign = appid + text + str(salt) + secretKey
sign = hashlib.md5(sign.encode()).hexdigest()
url = url_baidu + '?appid=' + appid + '&q=' + urllib.parse.quote(text) + '&from=' + f + '&to=' + t + '&salt=' + str(salt) + '&sign=' + sign
response = urllib.request.urlopen(url)
content = response.read().decode('utf-8')
data = json.loads(content)
result = str(data['trans_result'][0]['dst'])
return result
def get_cn_json_class(en_imagenet_class_path, cn_imagenet_class_path):
fn = open(en_imagenet_class_path, "r", encoding='UTF-8')
j = fn.read()
dic = json.loads(j) #英文原版
fn.close()
txt_dic = {} #中文
for i in range(0, 1000):
try:
start = time.time()
txt_dic[dic[str(i)][1]] = translateBaidu(dic[str(i)][1])
end = time.time()
if end - start < 1:
time.sleep(1) # api接口限制,每秒调用1次
except Exception as e:
print(e)
json_str = json.dumps(txt_dic)
file_object = open(cn_imagenet_class_path, 'w')
file_object.write(json_str)
file_object.close()
def image_get_class_cn_dict(cn_imagenet_class_path):
"""
获得分类的中文对照词典
:param cn_imagenet_class_path:
:return:
"""
fn = open(cn_imagenet_class_path, "r", encoding='UTF-8')
str_json = fn.read()
dic = json.loads(str_json)
fn.close()
return dic
# #*******************文本特征提取开始***************************
# #原始数据的读入
# #df_text,df_user,df_image = train_data_read(train_csv_path)
# start = time.time()
# # 读入停用词表、积极词汇表、消极词汇表
# stopwords = get_stopwords_list()
# possentiwords = get_possentiwords_list()
# negsentiwords = get_negsentiwords_list()
# #文本的读入
# df_text = text_data_read()
# #微博文本扩展特征数据列
# new_text_features_list = ['text_length', 'contains_questmark', 'num_questmarks', 'contains_exclammark',
# 'num_exclammarks', 'contains_hashtag', 'num_hashtags', 'contains_URL',
# 'num_URLs', 'contains_mention', 'num_mentions', 'sentiment_score',
# 'num_noun','num_verb','num_pronoun','num_possentiwords','num_negsentiwords',
# 'contains_firstorderpron','contains_secondorderpron','contains_thirdorderpron']
# # 浪费时间
# for i in range(1,101):
# new_text_features_list.append('word2vec_'+str(i))
# df_text = text_insert_cols(df_text,new_text_features_list)
# #加载sentiment model
# if not os.path.isfile(sentiment_model_path + '.3'):
# # 情感分析语料模型训练
# text_train_sentiment()
# else:
# logging.info("sentiment model is ready!")
# #加载word2vec model
# if not os.path.isfile(word2vec_model_path):
# # 获得词向量训练语料
# text_get_clear_word2vec_corpus(word2vec_txt_path)
# # 训练word2vec模型
# model_word2vec = text_train_word2vec_model(word2vec_txt_path, word2vec_model_path)
# else:
# # 加载word2vec模型
# #model_word2vec = text_load_word2vec_model(word2vec_model_path)
# model_word2vec = gensim.models.KeyedVectors.load_word2vec_format(r'G:\毕设\数据集\微博\news_12g_baidubaike_20g_novel_90g_embedding_64.bin', binary=True)
# remember_delete = 1
# #文本特征提取
# df_text = text_feature_extraction(df_text)
# #文本特征保存
# df_text.to_csv(text_csv_path,index=0)#不保留行索引
# end = time.time()
# logging.info("运行时间:"+str(end-start))
# #*******************文本特征提取结束***************************
# #*******************用户特征提取开始***************************
# start = time.time()
# #原始数据读入
# df_user = user_data_read()
# #用户新特征列扩展
# new_user_features_list = ['folfans_ratio']
# df_user = user_insert_cols(df_user,new_user_features_list)
# #用户特征提取
# df_user = user_feature_extraction(df_user)
# #用户特征保存
# df_user.to_csv(user_csv_path,index=0)#不保留行索引
# end = time.time()
# logging.info("运行时间:"+str(end-start))
# #*******************用户特征提取结束***************************
# #*******************图片特征提取开始***************************
# start = time.time()
# #原始数据读入
# df_image = image_data_read()
# #图片新特征列扩展
# new_image_features_list = ['h_first_moment','s_first_moment','v_first_moment',
# 'h_second_moment','s_second_moment','v_second_moment',
# 'h_third_moment','s_third_moment','v_third_moment',
# 'tf_vgg19_class','tf_resnet50_class','image_width','image_height','image_kb','sim_image_word']
# for i in range(1,2049):
# new_image_features_list.append('resnet_'+str(i))
# df_image = image_insert_cols(df_image,new_image_features_list)
# #pytorch ResNet 50网络
# model_resnet50 = net()
# model_resnet50.eval()
# model_resnet50 = model_resnet50.cuda()
# #tensorflow vgg19和resnet50模型
# model_tf_vgg19 = vgg19.VGG19(weights='imagenet')
# model_tf_resnet50 = resnet50.ResNet50(weights='imagenet')
# model_word2vec = gensim.models.KeyedVectors.load_word2vec_format(r'G:\毕设\数据集\微博\news_12g_baidubaike_20g_novel_90g_embedding_64.bin', binary=True)
# #获得vgg19和resnet50分类的图片top1可信度list
# list_vgg19_score, list_resnet50_score = image_get_score_list(image_class_vgg19_score_path, image_class_resnet50_score_path)
# #获得中文对照词典
# dict_image_class = image_get_class_cn_dict(cn_imagenet_class_path)
# #获得文本特征中的微博原文
# df_text = pd.read_csv(text_csv_path, usecols=['text']) #只加载text列,提升速度,减小不必要的内存损耗
# #图片特征提取
# df_image = image_feature_extraction(df_image)
# #图片特征保存
# df_image.to_csv(image_csv_path,index=0)#不保留行索引
# end = time.time()
# logging.info("运行时间:"+str(end-start))
# #*******************图片特征提取结束***************************
# 2020-02-09 19:30:23,551 : INFO : 图片有问题Given groups=1, weight of size 64 3 7 7, expected input[1, 1, 224, 224] to have 3 channels, but got 1 channels instead
# Loaded runtime CuDNN library: 7.5.1 but source was compiled with: 7.6.5. CuDNN library major and minor version needs to match or have higher minor version in case of CuDNN 7.0 or later version. If using a binary install, upgrade your CuDNN library. If building from sources, make sure the library loaded at runtime is compatible with the version specified during compile configuration.
#新数据集的训练和测试
train_rumor_txt_path = r'G:\test\tweets\train_rumor.txt'
train_non_rumor_txt_path = r'G:\test\tweets\train_nonrumor.txt'
test_rumor_txt_path = r'G:\test\tweets\test_rumor.txt'
test_non_rumor_txt_path = r'G:\test\tweets\test_nonrumor.txt'
social_feature_txt_path = r'G:\test\social_feature.txt'
test_csv_path = r"G:/result_origin.csv"
#将原始数据集提取为需要数据内容的csv文件
def get_train_csv(rumor_path, label, save_path):
features_list = ['id', 'user_name', 'tweet_url', 'user_url', 'publish_time',
'original', 'retweet_count', 'comment_count', 'praise_count', 'user_id',
'user_authentication_type', 'user_fans_count', 'user_follow_count', 'user_weibo_count', 'publish_platform',
'piclist', 'text', 'label']
write_list = []
with open(rumor_path,'r', encoding='UTF-8') as f:
list_rumor = f.readlines()
i = 1
list_content = []
for line in list_rumor:
if i == 1: # 基础信息列
info_list = line.split('|')
info_list[-1] = info_list[-1].replace('\n','')
list_content.extend(info_list)
info_list.clear()
i += 1
elif i == 2: # 图片列
list_content.append(line.replace('|null\n',''))
i += 1
else: # 微博正文
list_content.append(line.replace('\n','').replace(',',','))
list_content.append(str(label)+'\n')
write_list.append(','.join(list_content))
list_content.clear()
i = 1
with open(save_path, 'w+', encoding='UTF-8') as fwrite:
fwrite.write(','.join(features_list)+'\n')
fwrite.writelines(write_list)
# get_train_csv(test_rumor_txt_path, 1, r"G:/test_data.csv")
#删除新数据集csv文件不需要的列,并规范化命名,与原始数据集命名相同
def polish_test_csv():
# 删除多余特征
# drop_list = ['id', 'user_name', 'tweet_url', 'user_url', 'publish_time', 'original','retweet_count','comment_count','praise_count','user_id','user_authentication_type','publish_platform','people','location','organization','words']
# df = pd.read_csv(r"G:/result_test.csv")
# df.drop(drop_list, axis=1, inplace=True)
# print(df.shape)
# df.to_csv(r"G:/result_origin.csv",index=0)#不保留行索引
df = pd.read_csv(r"G:/result_origin.csv")
# 处理piclist列,使其保持和原始train.csv数据列一样
i = 0
for index, row in df.iterrows():
# 获得piclist列
if not pd.isna(row['piclist']):
new_content_list = []
pic_list = row['piclist'].split('|')
for item in pic_list:
new_content_list.append(item.split('/')[-1])
df.at[i, 'piclist'] = '\t'.join(new_content_list)
new_content_list.clear()
i += 1
df.to_csv(r"G:/result_origin.csv",index=0)#不保留行索引
#将社交特征与测试集csv融合起来
def new_csv_fusion():
df1 = pd.read_csv(r"G:/test_data.csv")
df2 = pd.read_csv(r"G:/social_feature.csv")
result = pd.merge(df1, df2, on="id")
result.to_csv(r"G:/result_test.csv", index=0) # 不保留行索引
print(df1.shape)
print(result)
print(result.shape)
#补充文本特征
def get_text_feature():
# 原始数据的读入
df_text = pd.read_csv(test_csv_path)
start = time.time()
# 读入停用词表、积极词汇表、消极词汇表
stopwords = get_stopwords_list()
possentiwords = get_possentiwords_list()
negsentiwords = get_negsentiwords_list()
# 微博文本扩展特征数据列
new_text_features_list = ['contains_questmark', 'contains_exclammark', 'contains_hashtag', 'contains_URL',
'contains_mention', 'num_noun', 'num_verb', 'num_pronoun', 'category']
# 浪费时间
for i in range(1, 65):
new_text_features_list.append('word2vec_' + str(i))
df_text = text_insert_cols(df_text, new_text_features_list)
# # 加载word2vec model
# if not os.path.isfile(word2vec_model_path):
# # 获得词向量训练语料
# text_get_clear_word2vec_corpus(word2vec_txt_path)
# # 训练word2vec模型
# model_word2vec = text_train_word2vec_model(word2vec_txt_path, word2vec_model_path)
# else:
# # 加载word2vec模型
# # model_word2vec = text_load_word2vec_model(word2vec_model_path)
# model_word2vec = gensim.models.KeyedVectors.load_word2vec_format(
# r'G:\毕设\数据集\微博\news_12g_baidubaike_20g_novel_90g_embedding_64.bin', binary=True)
# remember_delete = 1
# 文本特征提取
# df_text = text_feature_extraction(df_text)
# 文本特征保存
df_text.to_csv(test_csv_path, index=0) # 不保留行索引
end = time.time()
logging.info("运行时间:" + str(end - start))
#补充用户特征
def get_user_feature():
start = time.time()
# 原始数据读入
df_user = pd.read_csv(test_csv_path)
# 用户新特征列扩展
new_user_features_list = ['folfans_ratio', 'user_gender', 'user_location', 'user_description']
df_user = user_insert_cols(df_user, new_user_features_list)
# 用户特征提取
df_user = user_feature_extraction(df_user)
# 用户特征保存
df_user.to_csv(test_csv_path, index=0) # 不保留行索引
end = time.time()
logging.info("运行时间:" + str(end - start))
#补充图片特征
def get_image_feature(test_csv_path):
start = time.time()
# 原始数据读入
df_image = pd.read_csv(test_csv_path)
stopwords = get_stopwords_list()
# 图片新特征列扩展
# new_image_features_list = ['h_first_moment','s_first_moment','v_first_moment',
# 'h_second_moment','s_second_moment','v_second_moment',
# 'h_third_moment','s_third_moment','v_third_moment',
# 'tf_vgg19_class','tf_resnet50_class','image_width','image_height','image_kb','sim_image_word']
new_image_features_list = ['sim_image_word']
# for i in range(1,2049):
# new_image_features_list.append('resnet_'+str(i))
df_image = image_insert_cols(df_image, new_image_features_list)
# pytorch ResNet 50网络
# model_resnet50 = net()
# model_resnet50.eval()
# model_resnet50 = model_resnet50.cuda()
# tensorflow vgg19和resnet50模型
# model_tf_vgg19 = vgg19.VGG19(weights='imagenet')
# model_tf_resnet50 = resnet50.ResNet50(weights='imagenet')
model_word2vec = gensim.models.KeyedVectors.load_word2vec_format(
r'G:\毕设\数据集\微博\news_12g_baidubaike_20g_novel_90g_embedding_64.bin', binary=True)
# 获得vgg19和resnet50分类的图片top1可信度list
list_vgg19_score, list_resnet50_score = image_get_score_list(r'G:\test_image_class_vgg19.txt',
r'G:\test_image_class_resnet50.txt')
# 获得中文对照词典
dict_image_class = image_get_class_cn_dict(cn_imagenet_class_path)
# 获得文本特征中的微博原文
df_text = pd.read_csv(test_csv_path, usecols=['text']) # 只加载text列,提升速度,减小不必要的内存损耗
# 图片特征提取
df_image = image_feature_extraction(df_image)
# 图片特征保存
df_image.to_csv(test_csv_path, index=0) # 不保留行索引
end = time.time()
logging.info("运行时间:" + str(end - start))
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
fusion_csv_path = r'G:\毕设\数据集\微博\fusion_news_features.csv'
fusion_csv_path_0404_origin = r'G:\毕设\数据集\微博\fusion_news_features_0404_origin.csv'
fusion_csv_path_0404 = r'G:\毕设\数据集\微博\fusion_news_features_0404.csv'
start = time.time()
# 原始数据读入
df_image = | pd.read_csv(fusion_csv_path_0404_origin) | pandas.read_csv |
import os
from typing import Dict, Optional, Sequence
import pandas as pd
from pandas.testing import assert_series_equal
from finstmt import FinancialStatements
from tests.conftest import DEVELOPMENT_MODE, GENERATED_PATH, EXPECT_STATEMENTS_PATH
# Imported for test development purposes
from tests.expectdata.statements.load_capiq_cat_annual import LOAD_CAPIQ_CAT_A_INDEX_DATA_DICT
if DEVELOPMENT_MODE:
from tests.utils.gen.data_load import print_test_data_def, get_keys_for_inc_data_items, get_keys_for_bs_data_items
inc_keys = get_keys_for_inc_data_items()
bs_keys = get_keys_for_bs_data_items()
def check_data_items(stmts: FinancialStatements, data_dict: Dict[str, pd.Series],
ignore_keys: Optional[Sequence[str]] = None):
if ignore_keys is None:
ignore_keys = []
for item_key, item_values in data_dict.items():
if item_key in ignore_keys:
continue
item = getattr(stmts, item_key)
assert_series_equal(item, item_values, check_dtype=False)
class LoadTest:
name: str
a_test_data_dict: Dict[str, pd.Series]
q_test_data_dict: Dict[str, pd.Series]
def test_annual(self, stmts: FinancialStatements, data: Optional[Dict[str, pd.Series]] = None,
name: Optional[str] = None, ignore_keys: Optional[Sequence[str]] = None):
if data is None:
data = self.a_test_data_dict
if name is None:
name = self.name
if DEVELOPMENT_MODE:
out_path = os.path.join(EXPECT_STATEMENTS_PATH, f'{name}_annual.py')
with open(out_path, 'w') as f:
f.write('import pandas as pd\n\n')
f.write(
print_test_data_def(
stmts, inc_keys + bs_keys,
f'{name.upper()}_A_INDEX',
f'{name.upper()}_A_INDEX_DATA_DICT',
disp=False
)
)
else:
check_data_items(stmts, data, ignore_keys=ignore_keys)
def test_quarterly(self, stmts: FinancialStatements, data: Optional[Dict[str, pd.Series]] = None,
name: Optional[str] = None, ignore_keys: Optional[Sequence[str]] = None):
if data is None:
data = self.q_test_data_dict
if name is None:
name = self.name
if DEVELOPMENT_MODE:
out_path = os.path.join(EXPECT_STATEMENTS_PATH, f'{name}_quarterly.py')
with open(out_path, 'w') as f:
f.write('import pandas as pd\n\n')
f.write(
print_test_data_def(
stmts, inc_keys + bs_keys,
f'{name.upper()}_Q_INDEX',
f'{name.upper()}_Q_INDEX_DATA_DICT',
disp=False
)
)
else:
check_data_items(stmts, data, ignore_keys=ignore_keys)
class TestLoadStockrowCAT(LoadTest):
name = 'load_stockrow_cat'
a_index_str = ["2009-12-31 00:00:00", "2010-12-31 00:00:00", "2011-12-31 00:00:00", "2012-12-31 00:00:00",
"2013-12-31 00:00:00", "2014-12-31 00:00:00", "2015-12-31 00:00:00", "2016-12-31 00:00:00",
"2017-12-31 00:00:00", "2018-12-31 00:00:00"]
a_index = [ | pd.to_datetime(val) | pandas.to_datetime |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
import matplotlib.font_manager as fm
import math as m
import matplotlib.dates as mdates
import netCDF4 as nc
from netCDF4 import Dataset
id
import itertools
import datetime
from scipy.stats import ks_2samp
import matplotlib.colors as colors
"Codigo que permite la porderación de la nubosidad por la ponderación de sus horas"
## ----------------------LECTURA DE DATOS DE GOES CH02----------------------- ##
ds = Dataset('/home/nacorreasa/Maestria/Datos_Tesis/GOES/GOES_nc_CREADOS/GOES_VA_C2_2019_0320_0822.nc')
## -----------------INCORPORANDO LOS DATOS DE RADIACIÓN Y DE LOS EXPERIMENTOS----------------- ##
df_P975 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Experimentos_Panel/Panel975.txt', sep=',', index_col =0)
df_P350 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Experimentos_Panel/Panel350.txt', sep=',', index_col =0)
df_P348 = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Experimentos_Panel/Panel348.txt', sep=',', index_col =0)
df_P975['Fecha_hora'] = df_P975.index
df_P350['Fecha_hora'] = df_P350.index
df_P348['Fecha_hora'] = df_P348.index
df_P975.index = pd.to_datetime(df_P975.index, format="%Y-%m-%d %H:%M:%S", errors='coerce')
df_P350.index = pd.to_datetime(df_P350.index, format="%Y-%m-%d %H:%M:%S", errors='coerce')
df_P348.index = pd.to_datetime(df_P348.index, format="%Y-%m-%d %H:%M:%S", errors='coerce')
## ----------------ACOTANDO LOS DATOS A VALORES VÁLIDOS---------------- ##
'Como en este caso lo que interesa es la radiacion, para la filtración de los datos, se'
'considerarán los datos de potencia mayores o iguales a 0, los que parecen generarse una'
'hora despues de cuando empieza a incidir la radiación.'
df_P975 = df_P975[(df_P975['radiacion'] > 0) & (df_P975['strength'] >=0) & (df_P975['NI'] >=0)]
df_P350 = df_P350[(df_P350['radiacion'] > 0) & (df_P975['strength'] >=0) & (df_P975['NI'] >=0)]
df_P348 = df_P348[(df_P348['radiacion'] > 0) & (df_P975['strength'] >=0) & (df_P975['NI'] >=0)]
df_P975_h = df_P975.groupby(pd.Grouper(level='fecha_hora', freq='1H')).mean()
df_P350_h = df_P350.groupby(pd.Grouper(level='fecha_hora', freq='1H')).mean()
df_P348_h = df_P348.groupby(pd.Grouper(level='fecha_hora', freq='1H')).mean()
df_P975_h = df_P975_h.between_time('06:00', '17:00')
df_P350_h = df_P350_h.between_time('06:00', '17:00')
df_P348_h = df_P348_h.between_time('06:00', '17:00')
#-----------------------------------------------------------------------------
# Rutas para las fuentes -----------------------------------------------------
prop = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Heavy.otf' )
prop_1 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Book.otf')
prop_2 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Black.otf')
## -------------------------------------------------------------------------- ##
Umbral_up_348 = 46.26875
Umbral_down_348 = 22.19776
Umbrales_348 = [Umbral_down_348, Umbral_up_348]
Umbral_up_350 = 49.4412
Umbral_down_350 = 26.4400
Umbrales_350 = [Umbral_down_350, Umbral_up_350]
Umbral_up_975 = 49.4867
Umbral_down_975 = 17.3913
Umbrales_975 = [Umbral_down_975, Umbral_up_975]
lat = ds.variables['lat'][:, :]
lon = ds.variables['lon'][:, :]
Rad = ds.variables['Radiancias'][:, :, :]
## -- Obtener el tiempo para cada valor
tiempo = ds.variables['time']
fechas_horas = nc.num2date(tiempo[:], units=tiempo.units)
for i in range(len(fechas_horas)):
fechas_horas[i] = fechas_horas[i].strftime('%Y-%m-%d %H:%M')
## -- Selección del pixel de la TS y creación de DF
lat_index_975 = np.where((lat[:, 0] > 6.25) & (lat[:, 0] < 6.26))[0][0]
lon_index_975 = np.where((lon[0, :] < -75.58) & (lon[0, :] > -75.59))[0][0]
Rad_pixel_975 = Rad[:, lat_index_975, lon_index_975]
Rad_df_975 = pd.DataFrame()
Rad_df_975['Fecha_Hora'] = fechas_horas
Rad_df_975['Radiacias'] = Rad_pixel_975
Rad_df_975['Fecha_Hora'] = pd.to_datetime(Rad_df_975['Fecha_Hora'], format="%Y-%m-%d %H:%M", errors='coerce')
Rad_df_975.index = Rad_df_975['Fecha_Hora']
Rad_df_975 = Rad_df_975.drop(['Fecha_Hora'], axis=1)
## -- Selección del pixel de la CI
lat_index_350 = np.where((lat[:, 0] > 6.16) & (lat[:, 0] < 6.17))[0][0]
lon_index_350 = np.where((lon[0, :] < -75.64) & (lon[0, :] > -75.65))[0][0]
Rad_pixel_350 = Rad[:, lat_index_350, lon_index_350]
Rad_df_350 = pd.DataFrame()
Rad_df_350['Fecha_Hora'] = fechas_horas
Rad_df_350['Radiacias'] = Rad_pixel_350
Rad_df_350['Fecha_Hora'] = pd.to_datetime(Rad_df_350['Fecha_Hora'], format="%Y-%m-%d %H:%M", errors='coerce')
Rad_df_350.index = Rad_df_350['Fecha_Hora']
Rad_df_350 = Rad_df_350.drop(['Fecha_Hora'], axis=1)
## -- Selección del pixel de la JV
lat_index_348 = np.where((lat[:, 0] > 6.25) & (lat[:, 0] < 6.26))[0][0]
lon_index_348 = np.where((lon[0, :] < -75.54) & (lon[0, :] > -75.55))[0][0]
Rad_pixel_348 = Rad[:, lat_index_348, lon_index_348]
Rad_df_348 = pd.DataFrame()
Rad_df_348['Fecha_Hora'] = fechas_horas
Rad_df_348['Radiacias'] = Rad_pixel_348
Rad_df_348['Fecha_Hora'] = pd.to_datetime(Rad_df_348['Fecha_Hora'], format="%Y-%m-%d %H:%M", errors='coerce')
Rad_df_348.index = Rad_df_348['Fecha_Hora']
Rad_df_348 = Rad_df_348.drop(['Fecha_Hora'], axis=1)
'OJOOOO DESDE ACÁ-----------------------------------------------------------------------------------------'
'Se comenta porque se estaba perdiendo la utilidad de la información cada 10 minutos al suavizar la serie.'
## ------------------------CAMBIANDO LOS DATOS HORARIOS POR LOS ORIGINALES---------------------- ##
Rad_df_348_h = Rad_df_348
Rad_df_350_h = Rad_df_350
Rad_df_975_h = Rad_df_975
## ------------------------------------DATOS HORARIOS DE REFLECTANCIAS------------------------- ##
# Rad_df_348_h = Rad_df_348.groupby(pd.Grouper(freq="H")).mean()
# Rad_df_350_h = Rad_df_350.groupby(pd.Grouper(freq="H")).mean()
# Rad_df_975_h = Rad_df_975.groupby(pd.Grouper(freq="H")).mean()
'OJOOOO HASTA ACÁ-----------------------------------------------------------------------------------------'
Rad_df_348_h = Rad_df_348_h.between_time('06:00', '17:00')
Rad_df_350_h = Rad_df_350_h.between_time('06:00', '17:00')
Rad_df_975_h = Rad_df_975_h.between_time('06:00', '17:00')
## --------------------------------------FDP COMO NP.ARRAY----- ------------------------------ ##
Hist_348 = np.histogram(Rad_df_348_h['Radiacias'].values[~np.isnan(Rad_df_348_h['Radiacias'].values)])
Hist_350 = np.histogram(Rad_df_350_h['Radiacias'].values[~np.isnan(Rad_df_350_h['Radiacias'].values)])
Hist_975 = np.histogram(Rad_df_975_h['Radiacias'].values[~np.isnan(Rad_df_975_h['Radiacias'].values)])
## ---------------------------------FDP COMO GRÁFICA----------------------------------------- ##
fig = plt.figure(figsize=[10, 6])
plt.rc('axes', edgecolor='gray')
ax1 = fig.add_subplot(1, 3, 1)
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.hist(Rad_df_348_h['Radiacias'].values[~np.isnan(Rad_df_348_h['Radiacias'].values)], bins='auto', alpha = 0.5)
Umbrales_line1 = [ax1.axvline(x=xc, color='k', linestyle='--') for xc in Umbrales_348]
ax1.set_title(u'Distribución del FR en JV', fontproperties=prop, fontsize = 13)
ax1.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax1.set_xlabel(u'Reflectancia', fontproperties=prop_1)
ax2 = fig.add_subplot(1, 3, 2)
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.hist(Rad_df_350_h['Radiacias'].values[~np.isnan(Rad_df_350_h['Radiacias'].values)], bins='auto', alpha = 0.5)
Umbrales_line2 = [ax2.axvline(x=xc, color='k', linestyle='--') for xc in Umbrales_350]
ax2.set_title(u'Distribución del FR en CI', fontproperties=prop, fontsize = 13)
ax2.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax2.set_xlabel(u'Reflectancia', fontproperties=prop_1)
ax3 = fig.add_subplot(1, 3, 3)
ax3.spines['top'].set_visible(False)
ax3.spines['right'].set_visible(False)
ax3.hist(Rad_df_975_h['Radiacias'].values[~np.isnan(Rad_df_975_h['Radiacias'].values)], bins='auto', alpha = 0.5)
Umbrales_line3 = [ax3.axvline(x=xc, color='k', linestyle='--') for xc in Umbrales_975]
ax3.set_title(u'Distribución del FR en TS', fontproperties=prop, fontsize = 13)
ax3.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax3.set_xlabel(u'Reflectancia', fontproperties=prop_1)
plt.savefig('/home/nacorreasa/Escritorio/Figuras/HistoFRUmbral.png')
plt.show()
## -------------------------OBTENER EL DF DEL ESCENARIO DESPEJADO---------------------------- ##
df_348_desp = Rad_df_348_h[Rad_df_348_h['Radiacias'] < Umbral_down_348]
df_350_desp = Rad_df_350_h[Rad_df_350_h['Radiacias'] < Umbral_down_350]
df_975_desp = Rad_df_975_h[Rad_df_975_h['Radiacias'] < Umbral_down_975]
## --------------------------OBTENER EL DF DEL ESCENARIO NUBADO------------------------------ ##
df_348_nuba = Rad_df_348_h[Rad_df_348_h['Radiacias'] > Umbral_up_348]
df_350_nuba = Rad_df_350_h[Rad_df_350_h['Radiacias'] > Umbral_up_350]
df_975_nuba = Rad_df_975_h[Rad_df_975_h['Radiacias'] > Umbral_up_975]
## -------------------------OBTENER LAS HORAS Y FECHAS DESPEJADAS---------------------------- ##
Hora_desp_348 = df_348_desp.index.hour
Fecha_desp_348 = df_348_desp.index.date
Hora_desp_350 = df_350_desp.index.hour
Fecha_desp_350 = df_350_desp.index.date
Hora_desp_975 = df_975_desp.index.hour
Fecha_desp_975 = df_975_desp.index.date
## ----------------------------OBTENER LAS HORAS Y FECHAS NUBADAS---------------------------- ##
Hora_nuba_348 = df_348_nuba.index.hour
Fecha_nuba_348 = df_348_nuba.index.date
Hora_nuba_350 = df_350_nuba.index.hour
Fecha_nuba_350 = df_350_nuba.index.date
Hora_nuba_975 = df_975_nuba.index.hour
Fecha_nuba_975 = df_975_nuba.index.date
## -----------------------------DIBUJAR LOS HISTOGRAMAS DE LAS HORAS ------ ----------------------- #
fig = plt.figure(figsize=[10, 6])
plt.rc('axes', edgecolor='gray')
ax1 = fig.add_subplot(1, 3, 1)
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.hist(Hora_desp_348, bins='auto', alpha = 0.5, color = 'orange', label = 'Desp')
ax1.hist(Hora_nuba_348, bins='auto', alpha = 0.5, label = 'Nub')
ax1.set_title(u'Distribución de nubes por horas en JV', fontproperties=prop, fontsize = 8)
ax1.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax1.set_xlabel(u'Horas', fontproperties=prop_1)
ax1.legend()
ax2 = fig.add_subplot(1, 3, 2)
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.hist(Hora_desp_350, bins='auto', alpha = 0.5, color = 'orange', label = 'Desp')
ax2.hist(Hora_nuba_350, bins='auto', alpha = 0.5, label = 'Nub')
ax2.set_title(u'Distribución de nubes por horas en CI', fontproperties=prop, fontsize = 8)
ax2.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax2.set_xlabel(u'Horas', fontproperties=prop_1)
ax2.legend()
ax3 = fig.add_subplot(1, 3, 3)
ax3.spines['top'].set_visible(False)
ax3.spines['right'].set_visible(False)
ax3.hist(Hora_desp_975, bins='auto', alpha = 0.5, color = 'orange', label = 'Desp')
ax3.hist(Hora_nuba_975, bins='auto', alpha = 0.5, label = 'Nub')
ax3.set_title(u'Distribución de nubes por horas en TS', fontproperties=prop, fontsize = 8)
ax3.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax3.set_xlabel(u'Horas', fontproperties=prop_1)
ax3.legend()
plt.savefig('/home/nacorreasa/Escritorio/Figuras/HistoNubaDesp.png')
plt.show()
##----------ENCONTRANDO LAS RADIACIONES CORRESPONDIENTES A LAS HORAS NUBOSAS----------##
df_FH_nuba_348 = pd.DataFrame()
df_FH_nuba_348 ['Fechas'] = Fecha_nuba_348
df_FH_nuba_348 ['Horas'] = Hora_nuba_348
df_FH_nuba_350 = pd.DataFrame()
df_FH_nuba_350 ['Fechas'] = Fecha_nuba_350
df_FH_nuba_350 ['Horas'] = Hora_nuba_350
df_FH_nuba_975 = pd.DataFrame()
df_FH_nuba_975 ['Fechas'] = Fecha_nuba_975
df_FH_nuba_975 ['Horas'] = Hora_nuba_975
df_FH_nuba_348_groupH = df_FH_nuba_348.groupby('Horas')['Fechas'].unique()
df_nuba_348_groupH = pd.DataFrame(df_FH_nuba_348_groupH[df_FH_nuba_348_groupH.apply(lambda x: len(x)>1)]) ##NO entiendo bien acá que se está haciendo
df_FH_nuba_350_groupH = df_FH_nuba_350.groupby('Horas')['Fechas'].unique()
df_nuba_350_groupH = pd.DataFrame(df_FH_nuba_350_groupH[df_FH_nuba_350_groupH.apply(lambda x: len(x)>1)])
df_FH_nuba_975_groupH = df_FH_nuba_975.groupby('Horas')['Fechas'].unique()
df_nuba_975_groupH = pd.DataFrame(df_FH_nuba_975_groupH[df_FH_nuba_975_groupH.apply(lambda x: len(x)>1)])
c = np.arange(6, 18, 1)
Sk_Nuba_stat_975 = {}
Sk_Nuba_pvalue_975 = {}
Composites_Nuba_975 = {}
for i in df_FH_nuba_975_groupH.index:
H = str(i)
if len(df_FH_nuba_975_groupH.loc[i]) == 1 :
list = df_P975_h[df_P975_h.index.date == df_FH_nuba_975_groupH.loc[i][0]]['radiacion'].values
list_sk_stat = np.ones(12)*np.nan
list_sk_pvalue = np.ones(12)*np.nan
elif len(df_FH_nuba_975_groupH.loc[i]) > 1 :
temporal = pd.DataFrame()
for j in range(len(df_FH_nuba_975_groupH.loc[i])):
temporal = temporal.append(pd.DataFrame(df_P975_h[df_P975_h.index.date == df_FH_nuba_975_groupH.loc[i][j]]['radiacion']))
stat_975 = []
pvalue_975 = []
for k in c:
temporal_sk = temporal[temporal.index.hour == k].radiacion.values
Rad_sk = df_P975_h['radiacion'][df_P975_h.index.hour == k].values
try:
SK = ks_2samp(temporal_sk,Rad_sk)
stat_975.append(SK[0])
pvalue_975.append(SK[1])
except ValueError:
stat_975.append(np.nan)
pvalue_975.append(np.nan)
temporal_CD = temporal.groupby(by=[temporal.index.hour]).mean()
list = temporal_CD['radiacion'].values
list_sk_stat = stat_975
list_sk_pvalue = pvalue_975
Composites_Nuba_975[H] = list
Sk_Nuba_stat_975 [H] = list_sk_stat
Sk_Nuba_pvalue_975 [H] = list_sk_pvalue
del H
Comp_Nuba_975_df = pd.DataFrame(Composites_Nuba_975, index = c)
Sk_Nuba_stat_975_df = pd.DataFrame(Sk_Nuba_stat_975, index = c)
Sk_Nuba_pvalue_975_df = pd.DataFrame(Sk_Nuba_pvalue_975, index = c)
Sk_Nuba_stat_350 = {}
Sk_Nuba_pvalue_350 = {}
Composites_Nuba_350 = {}
for i in df_FH_nuba_350_groupH.index:
H = str(i)
if len(df_FH_nuba_350_groupH.loc[i]) == 1 :
list = df_P350_h[df_P350_h.index.date == df_FH_nuba_350_groupH.loc[i][0]]['radiacion'].values
list_sk_stat = np.ones(12)*np.nan
list_sk_pvalue = np.ones(12)*np.nan
elif len(df_FH_nuba_350_groupH.loc[i]) > 1 :
temporal = pd.DataFrame()
for j in range(len(df_FH_nuba_350_groupH.loc[i])):
temporal = temporal.append(pd.DataFrame(df_P350_h[df_P350_h.index.date == df_FH_nuba_350_groupH.loc[i][j]]['radiacion']))
stat_350 = []
pvalue_350 = []
for k in c:
temporal_sk = temporal[temporal.index.hour == k].radiacion.values
Rad_sk = df_P350_h['radiacion'][df_P350_h.index.hour == k].values
try:
SK = ks_2samp(temporal_sk,Rad_sk)
stat_350.append(SK[0])
pvalue_350.append(SK[1])
except ValueError:
stat_350.append(np.nan)
pvalue_350.append(np.nan)
temporal_CD = temporal.groupby(by=[temporal.index.hour]).mean()
list = temporal_CD['radiacion'].values
list_sk_stat = stat_350
list_sk_pvalue = pvalue_350
Composites_Nuba_350[H] = list
Sk_Nuba_stat_350 [H] = list_sk_stat
Sk_Nuba_pvalue_350 [H] = list_sk_pvalue
del H
Comp_Nuba_350_df = pd.DataFrame(Composites_Nuba_350, index = c)
Sk_Nuba_stat_350_df = pd.DataFrame(Sk_Nuba_stat_350, index = c)
Sk_Nuba_pvalue_350_df = pd.DataFrame(Sk_Nuba_pvalue_350, index = c)
Sk_Nuba_stat_348 = {}
Sk_Nuba_pvalue_348 = {}
Composites_Nuba_348 = {}
for i in df_FH_nuba_348_groupH.index:
H = str(i)
if len(df_FH_nuba_348_groupH.loc[i]) == 1 :
list = df_P348_h[df_P348_h.index.date == df_FH_nuba_348_groupH.loc[i][0]]['radiacion'].values
list_sk_stat = np.ones(12)*np.nan
list_sk_pvalue = np.ones(12)*np.nan
elif len(df_FH_nuba_348_groupH.loc[i]) > 1 :
temporal = pd.DataFrame()
for j in range(len(df_FH_nuba_348_groupH.loc[i])):
temporal = temporal.append(pd.DataFrame(df_P348_h[df_P348_h.index.date == df_FH_nuba_348_groupH.loc[i][j]]['radiacion']))
stat_348 = []
pvalue_348 = []
for k in c:
temporal_sk = temporal[temporal.index.hour == k].radiacion.values
Rad_sk = df_P348_h['radiacion'][df_P348_h.index.hour == k].values
try:
SK = ks_2samp(temporal_sk,Rad_sk)
stat_348.append(SK[0])
pvalue_348.append(SK[1])
except ValueError:
stat_348.append(np.nan)
pvalue_348.append(np.nan)
temporal_CD = temporal.groupby(by=[temporal.index.hour]).mean()
list = temporal_CD['radiacion'].values
list_sk_stat = stat_348
list_sk_pvalue = pvalue_348
Composites_Nuba_348[H] = list
Sk_Nuba_stat_348 [H] = list_sk_stat
Sk_Nuba_pvalue_348 [H] = list_sk_pvalue
del H
Comp_Nuba_348_df = pd.DataFrame(Composites_Nuba_348, index = c)
Sk_Nuba_stat_348_df = pd.DataFrame(Sk_Nuba_stat_348, index = c)
Sk_Nuba_pvalue_348_df = pd.DataFrame(Sk_Nuba_pvalue_348, index = c)
##----------ENCONTRANDO LAS RADIACIONES CORRESPONDIENTES A LAS HORAS DESPEJADAS----------##
df_FH_desp_348 = pd.DataFrame()
df_FH_desp_348 ['Fechas'] = Fecha_desp_348
df_FH_desp_348 ['Horas'] = Hora_desp_348
df_FH_desp_350 = pd.DataFrame()
df_FH_desp_350 ['Fechas'] = Fecha_desp_350
df_FH_desp_350 ['Horas'] = Hora_desp_350
df_FH_desp_975 = pd.DataFrame()
df_FH_desp_975 ['Fechas'] = Fecha_desp_975
df_FH_desp_975 ['Horas'] = Hora_desp_975
df_FH_desp_348_groupH = df_FH_desp_348.groupby('Horas')['Fechas'].unique()
df_desp_348_groupH = pd.DataFrame(df_FH_desp_348_groupH[df_FH_desp_348_groupH.apply(lambda x: len(x)>1)]) ##NO entiendo bien acá que se está haciendo
df_FH_desp_350_groupH = df_FH_desp_350.groupby('Horas')['Fechas'].unique()
df_desp_350_groupH = pd.DataFrame(df_FH_desp_350_groupH[df_FH_desp_350_groupH.apply(lambda x: len(x)>1)])
df_FH_desp_975_groupH = df_FH_desp_975.groupby('Horas')['Fechas'].unique()
df_desp_975_groupH = pd.DataFrame(df_FH_desp_975_groupH[df_FH_desp_975_groupH.apply(lambda x: len(x)>1)])
Sk_Desp_stat_975 = {}
Sk_Desp_pvalue_975 = {}
Composites_Desp_975 = {}
for i in df_FH_desp_975_groupH.index:
H = str(i)
if len(df_FH_desp_975_groupH.loc[i]) == 1 :
list = df_P975_h[df_P975_h.index.date == df_FH_desp_975_groupH.loc[i][0]]['radiacion'].values
list_sk_stat = np.ones(12)*np.nan
list_sk_pvalue = np.ones(12)*np.nan
elif len(df_FH_desp_975_groupH.loc[i]) > 1 :
temporal = | pd.DataFrame() | pandas.DataFrame |
#!/bin/python
from baseball_scraper import baseball_reference, espn, fangraphs
from baseball_id import Lookup
from yahoo_fantasy_bot import utils, source
import pandas as pd
import numpy as np
import datetime
import logging
logger = logging.getLogger()
class Builder:
"""Class that constructs prediction datasets for hitters and pitchers.
The datasets it generates are fully populated with projected stats. The
projection stats are scraped from fangraphs.com.
:param lg: Yahoo! league
:type lg: yahoo_fantasy_api.league.League
:param cfg: config details
:type cfg: ConfigParser
:param csv_details: Details about projections, stored in csv format
:type csv_details: dict
:param ts: Scraper to use to pull team data from baseball_reference.com
:type ts: baseball_reference.TeamScraper
:param es: Scraper to use to pull probable starters from espn
:type es: espn.ProbableStartersScraper
:param tss: Scraper to use to pull team list data from baseball_reference
:type tss: baseball_reference.TeamSummaryScraper
"""
def __init__(self, lg, cfg, csv_details, ts, es, tss):
hitters = source.read_csv(csv_details['hitters'])
pitchers = source.read_csv(csv_details['pitchers'])
self.ppool = pd.concat([hitters, pitchers], sort=True)
self.id_lookup = Lookup
self.use_weekly_schedule = \
cfg['Scorer'].getboolean('useWeeklySchedule')
self.source = cfg['Prediction']['source']
self.ts = ts
self.es = es
self.tss = tss
self.join_col_csv = cfg['Prediction']['join_column_csv']
self.join_col_id_lookup = cfg['Prediction']['join_column_id_lookup']
if lg.settings()['weekly_deadline'] != '1':
raise RuntimeError("This bot only supports weekly lineups.")
# In the preseason the edit date will be the next day. Only once the
# season starts does the edit date advance to the start of the next
# week.
if lg.current_week() == 1:
self.wk_start_date, self.wk_end_date = lg.week_date_range(1)
else:
self.wk_start_date = lg.edit_date()
assert(self.wk_start_date.weekday() == 0)
self.wk_end_date = self.wk_start_date + datetime.timedelta(days=6)
self.season_end_date = datetime.date(self.wk_end_date.year, 12, 31)
def __getstate__(self):
return (self.ppool, self.ts, self.es, self.tss, self.join_col_csv,
self.join_col_id_lookup, self.wk_start_date,
self.wk_end_date, self.season_end_date,
self.use_weekly_schedule, self.source)
def __setstate__(self, state):
self.id_lookup = Lookup
(self.ppool, self.ts, self.es, self.tss, self.join_col_csv,
self.join_col_id_lookup, self.wk_start_date, self.wk_end_date,
self.season_end_date, self.use_weekly_schedule, self.source) = state
def set_id_lookup(self, lk):
self.id_lookup = lk
def select_players(self, plyrs):
"""Return players from the player pool that match the given Yahoo! IDs
:param plyrs: List of dicts that contain the player name and their
Yahoo! ID. These are all of the players we will return.
:return: List of players from the player pool
"""
if self.source.startswith("yahoo"):
for plyr in plyrs:
stats = self.ppool[self.ppool['player_id'] == plyr['player_id']].to_dict('records')[0]
dict = {**stats, **plyr}
yield | pd.Series(dict) | pandas.Series |
# Common library routines for the BCycle analysis
import pandas as pd
import numpy as np
INPUT_DIR = '../input'
def load_bikes(file=INPUT_DIR + '/bikes.csv'):
'''
Load the bikes CSV file, converting column types
INPUT: Filename to read (defaults to `../input/bikes.csv`
RETURNS: Pandas dataframe containing bikes information
'''
try:
bikes_df = pd.read_csv(file,
dtype={'station_id' : np.int8,
'bikes' : np.int8,
'docks' : np.int8}
)
bikes_df['datetime'] = pd.to_datetime(bikes_df['datetime'], format='%Y-%m-%d %H:%M:%S')
return bikes_df
except OSError as e:
print('Error opening {0}. Do you need to unzip {0}.zip?'.format(file))
return None
def load_stations(file=INPUT_DIR + '/stations.csv'):
'''
Load the stations CSV file, converting column types
INPUT: Filename to read (defaults to `../input/stations.csv`
RETURNS: Pandas dataframe containing stations information
'''
try:
stations_df = pd.read_csv(file,
dtype={'station_id' : np.int8,
'lat' : np.float32,
'lon' : np.float32}
)
stations_df['datetime'] = pd.to_datetime(stations_df['datetime'], format='%Y-%m-%d %H:%M:%S')
return stations_df
except OSError as e:
print('Error opening {0}. Do you need to unzip {0}.zip?'.format(file))
return None
def load_weather(file=INPUT_DIR + '/weather.csv'):
'''Loads the weather CSV and converts types'''
try:
df = pd.read_csv(file)
except OSError as e:
print('Error opening {0}. Do you need to unzip {0}.zip?'.format(file))
return None
# Remove whitespace and keep min/max values
df.columns = [col.strip() for col in df.columns]
df = df[['CDT','Max TemperatureF','Min TemperatureF',
'Max Humidity', 'Min Humidity',
'Max Sea Level PressureIn', 'Min Sea Level PressureIn',
'Max Wind SpeedMPH', 'Mean Wind SpeedMPH', 'Max Gust SpeedMPH',
'PrecipitationIn', 'CloudCover', 'Events']]
# Clean up column names, drop means as they're a linear combination of max/min
df.columns = ['date', 'max_temp', 'min_temp', 'max_humidity', 'min_humidity',
'max_pressure', 'min_pressure', 'max_wind', 'min_wind', 'max_gust',
'precipitation', 'cloud_cover', 'events']
# Convert column types appropriately
df['date'] = | pd.to_datetime(df['date'], format='%Y-%m-%d') | pandas.to_datetime |
# So we are going to solve the task packaging problem as a bin packaging problem.
# Each day is a bin and we can compute à priori all the bins and their limits in man-hours
# for A checks is trivial and we keep assigining until limits are reached
# for C checks, when a C check starts, bins are created each day, then ordered
# the order of the bins is: fill the ones with least amount of aircrafts assigned first
import numpy as np
import pandas as pd
from copy import deepcopy
from datetime import datetime, date
from collections import OrderedDict
from tqdm import tqdm
from tr.core.utils import advance_date, save_pickle, load_pickle
def datetime_to_integer(dt_time):
# TODO: SOLVE FOR OLD DATA FIRST
if not isinstance(dt_time, pd.Timestamp) \
and not isinstance(dt_time, pd.DatetimeIndex) \
and not isinstance(dt_time, date):
return np.nan
return 10000 * dt_time.year + 100 * dt_time.month + dt_time.day
def integer_to_datetime(dt_time):
dt_time = str(dt_time)
try:
date = | pd.to_datetime(dt_time) | pandas.to_datetime |
import pandas as pd
import os
from tqdm import tqdm
import json
def news():
paths = os.listdir('文本分类数据集/ClassFile')
label = []
text = []
for path in tqdm(paths):
if path == '.DS_Store':
continue
p = '文本分类数据集/ClassFile/' + path
file_path = os.listdir(p)
for file in file_path:
with open(p + '/' + file, encoding='GBK', errors='ignore') as f:
content = f.read()
content = content.strip()
content = content.split('。')[0] + '。'
content = content.replace('\n', '')
content = content.replace('\t', '')
content = content.replace(' ', '')
content = content.replace(' ', '')
while content[0] == ';':
content = content[1:]
content = content.strip()
if 10 < len(content) < 128:
label.append(path)
text.append(content)
df = pd.DataFrame({'label': label, 'text': text})
df.to_csv('data/news_10.csv', index=False, encoding='utf_8_sig')
def sentiment():
text = []
label = []
for path in os.listdir('文本分类数据集/corpus/neg'):
with open('文本分类数据集/corpus/neg/' + path, encoding='GBK', errors='ignore') as file:
content = file.read()
content = content.replace('\n', '')
content = content.replace('\t', '')
content = content.replace(' ', '')
if content:
text.append(content)
label.append(0)
for path in os.listdir('文本分类数据集/corpus/pos'):
with open('文本分类数据集/corpus/pos/' + path, encoding='GBK', errors='ignore') as file:
content = file.read()
content = content.replace('\n', '')
content = content.replace('\t', '')
content = content.replace(' ', '')
if content:
text.append(content)
label.append(1)
df = pd.DataFrame({'label': label, 'text': text})
df.to_csv('data/sentiment_hotel.csv', index=False, encoding='utf_8_sig')
def sentiment2():
df1 = pd.read_csv('文本分类数据集/sentiment/train.tsv', sep='\t')
df2 = pd.read_csv('文本分类数据集/sentiment/dev.tsv', sep='\t')
df3 = | pd.read_csv('文本分类数据集/sentiment/test.tsv', sep='\t') | pandas.read_csv |
"""
the :mod:`fm` module includes a factorization machine algorithm.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import copy
from collections import defaultdict
import time
import warnings
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import torch
from torch import nn
import torch.utils.data
from .predictions import PredictionImpossible
from .algo_base import AlgoBase
class CandidateDataset(torch.utils.data.Dataset):
def __init__(self, x, y, w):
self.len = x.shape[0]
self.x = x
self.y = y
self.w = w
def __getitem__(self, index):
return self.x[index], self.y[index], self.w[index]
def __len__(self):
return self.len
class MyCriterion(nn.Module):
""" The PyTorch model for the loss function. This class is used by
`FM`.
Args:
sample_weight: bool, default: False
Whether sample weights are used.
binary: bool, default: False
Whether the output is binary.
"""
def __init__(self, sample_weight=False, binary=False):
super().__init__()
self.sample_weight = sample_weight
self.binary = binary
if self.binary:
if self.sample_weight:
self.loss_fn = nn.BCELoss(reduction='none')
else:
self.loss_fn = nn.BCELoss()
else:
if self.sample_weight:
self.loss_fn = nn.MSELoss(reduction='none')
else:
self.loss_fn = nn.MSELoss()
def forward(self, y_pred, y, w):
loss = self.loss_fn(y_pred, y)
if self.sample_weight:
loss = torch.dot(w, loss) / torch.sum(w)
return loss
class FMMixin():
def set_random_state(self):
if self.random_state is not None:
np.random.seed(self.random_state)
torch.manual_seed(self.random_state)
def _construct_fit_data(self):
""" Construct the data needed by the `fit()` function.
It is assumed that the user and item features are correctly encoded.
These dummies are created (if needed) using only the info in the
trainset.
"""
if self.user_lst and (self.trainset.n_user_features == 0):
raise ValueError('user_lst cannot be used since '
'there are no user_features')
if self.item_lst and (self.trainset.n_item_features == 0):
raise ValueError('item_lst cannot be used since '
'there are no item_features')
n_ratings = self.trainset.n_ratings
n_users = self.trainset.n_users
n_items = self.trainset.n_items
# Construct ratings_df from trainset
# The IDs are unique and start at 0
ratings_df = pd.DataFrame([tup for tup in self.trainset.all_ratings()],
columns=['userID', 'itemID', 'rating'])
# Initialize df with rating values
libsvm_df = | pd.DataFrame(ratings_df['rating']) | pandas.DataFrame |
import calendar
import pandas as pd
import mysql.connector
from etl_setup import *
import os
from etl_masterupdate import master_update
import time
start_t = time.time()
setup()
master_update()
# setting paths & file names
raw_data = 'C:\\Users\\work-dir\\sample data\\daily_sales_data\\'
work_path = 'C:\\Users\\misc-folder\\arch_test\\'
connect = "jun15_sep16.csv"
qlikview = "oct16_jul17.csv"
master = "id_region_master.xlsx"
host = 'localhost'
db = 'testdb'
# setting cwd
os.chdir(raw_data)
def dateparser(x): return pd.datetime.strptime(x, "%d-%m-%Y") # parsing dates and importing files
connect = pd.read_csv(raw_data+connect, date_parser=dateparser)
qlikview = pd.read_csv(raw_data+qlikview, date_parser=dateparser)
# importing files that would be moved to a sql db
# when moving to db take credential security into account
region_master = pd.read_excel(work_path+master)
# converting to datetime
connect['Date'] = pd.to_datetime(connect['Date'])
qlikview['Date'] = | pd.to_datetime(qlikview['Date']) | pandas.to_datetime |
import re
from unittest.mock import Mock, patch
import numpy as np
import pandas as pd
import pytest
from rdt.transformers import (
CategoricalTransformer, LabelEncodingTransformer, OneHotEncodingTransformer)
RE_SSN = re.compile(r'\d\d\d-\d\d-\d\d\d\d')
class TestCategoricalTransformer:
def test___init__(self):
"""Passed arguments must be stored as attributes."""
# Run
transformer = CategoricalTransformer(
fuzzy='fuzzy_value',
clip='clip_value',
)
# Asserts
assert transformer.fuzzy == 'fuzzy_value'
assert transformer.clip == 'clip_value'
def test__get_intervals(self):
# Run
data = pd.Series(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
result = CategoricalTransformer._get_intervals(data)
# Asserts
expected_intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
'bar': (0.5, 0.8333333333333333, 0.6666666666666666, 0.05555555555555555),
'tar': (0.8333333333333333, 0.9999999999999999, 0.9166666666666666,
0.027777777777777776)
}
assert result[0] == expected_intervals
def test_fit(self):
# Setup
transformer = CategoricalTransformer()
# Run
data = np.array(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
transformer.fit(data)
# Asserts
expected_intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
'bar': (0.5, 0.8333333333333333, 0.6666666666666666, 0.05555555555555555),
'tar': (0.8333333333333333, 0.9999999999999999, 0.9166666666666666,
0.027777777777777776)
}
assert transformer.intervals == expected_intervals
def test__get_value_no_fuzzy(self):
# Setup
transformer = CategoricalTransformer(fuzzy=False)
transformer.fuzzy = False
transformer.intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
}
# Run
result = transformer._get_value('foo')
# Asserts
assert result == 0.25
@patch('scipy.stats.norm.rvs')
def test__get_value_fuzzy(self, rvs_mock):
# setup
rvs_mock.return_value = 0.2745
transformer = CategoricalTransformer(fuzzy=True)
transformer.intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
}
# Run
result = transformer._get_value('foo')
# Asserts
assert result == 0.2745
def test__normalize_no_clip(self):
"""Test normalize data"""
# Setup
transformer = CategoricalTransformer(clip=False)
# Run
data = pd.Series([-0.43, 0.1234, 1.5, -1.31])
result = transformer._normalize(data)
# Asserts
expect = pd.Series([0.57, 0.1234, 0.5, 0.69], dtype=float)
pd.testing.assert_series_equal(result, expect)
def test__normalize_clip(self):
"""Test normalize data with clip=True"""
# Setup
transformer = CategoricalTransformer(clip=True)
# Run
data = pd.Series([-0.43, 0.1234, 1.5, -1.31])
result = transformer._normalize(data)
# Asserts
expect = pd.Series([0.0, 0.1234, 1.0, 0.0], dtype=float)
pd.testing.assert_series_equal(result, expect)
def test_reverse_transform_array(self):
"""Test reverse_transform a numpy.array"""
# Setup
data = np.array(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
rt_data = np.array([-0.6, 0.5, 0.6, 0.2, 0.1, -0.2])
transformer = CategoricalTransformer()
# Run
transformer.fit(data)
result = transformer.reverse_transform(rt_data)
# Asserts
expected_intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
'bar': (0.5, 0.8333333333333333, 0.6666666666666666, 0.05555555555555555),
'tar': (0.8333333333333333, 0.9999999999999999, 0.9166666666666666,
0.027777777777777776)
}
assert transformer.intervals == expected_intervals
expect = pd.Series(data)
pd.testing.assert_series_equal(result, expect)
def test__transform_by_category_called(self):
"""Test that the `_transform_by_category` method is called.
When the number of rows is greater than the number of categories, expect
that the `_transform_by_category` method is called.
Setup:
The categorical transformer is instantiated with 4 categories.
Input:
- data with 5 rows
Output:
- the output of `_transform_by_category`
Side effects:
- `_transform_by_category` will be called once
"""
# Setup
data = pd.Series([1, 3, 3, 2, 1])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
# Run
transformed = CategoricalTransformer.transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._transform_by_category.assert_called_once_with(data)
assert transformed == categorical_transformer_mock._transform_by_category.return_value
def test__transform_by_category(self):
"""Test the `_transform_by_category` method with numerical data.
Expect that the correct transformed data is returned.
Setup:
The categorical transformer is instantiated with 4 categories and intervals.
Input:
- data with 5 rows
Ouptut:
- the transformed data
"""
# Setup
data = pd.Series([1, 3, 3, 2, 1])
transformer = CategoricalTransformer()
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_category(data)
# Asserts
expected = np.array([0.875, 0.375, 0.375, 0.625, 0.875])
assert (transformed == expected).all()
def test__transform_by_row_called(self):
"""Test that the `_transform_by_row` method is called.
When the number of rows is less than or equal to the number of categories,
expect that the `_transform_by_row` method is called.
Setup:
The categorical transformer is instantiated with 4 categories.
Input:
- data with 4 rows
Output:
- the output of `_transform_by_row`
Side effects:
- `_transform_by_row` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
# Run
transformed = CategoricalTransformer.transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._transform_by_row.assert_called_once_with(data)
assert transformed == categorical_transformer_mock._transform_by_row.return_value
def test__transform_by_row(self):
"""Test the `_transform_by_row` method with numerical data.
Expect that the correct transformed data is returned.
Setup:
The categorical transformer is instantiated with 4 categories and intervals.
Input:
- data with 4 rows
Ouptut:
- the transformed data
"""
# Setup
data = pd.Series([1, 2, 3, 4])
transformer = CategoricalTransformer()
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_row(data)
# Asserts
expected = np.array([0.875, 0.625, 0.375, 0.125])
assert (transformed == expected).all()
@patch('psutil.virtual_memory')
def test__reverse_transfrom_by_matrix_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_matrix` method is called.
When there is enough virtual memory, expect that the
`_reverse_transform_by_matrix` method is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return a large enough `available_memory`.
Input:
- numerical data with 4 rows
Output:
- the output of `_reverse_transform_by_matrix`
Side effects:
- `_reverse_transform_by_matrix` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
categorical_transformer_mock._normalize.return_value = data
virtual_memory = Mock()
virtual_memory.available = 4 * 4 * 8 * 3 + 1
psutil_mock.return_value = virtual_memory
# Run
reverse = CategoricalTransformer.reverse_transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._reverse_transform_by_matrix.assert_called_once_with(data)
assert reverse == categorical_transformer_mock._reverse_transform_by_matrix.return_value
@patch('psutil.virtual_memory')
def test__reverse_transfrom_by_matrix(self, psutil_mock):
"""Test the _reverse_transform_by_matrix method with numerical data
Expect that the transformed data is correctly reverse transformed.
Setup:
The categorical transformer is instantiated with 4 categories and means. Also patch
the `psutil.virtual_memory` function to return a large enough `available_memory`.
Input:
- transformed data with 4 rows
Ouptut:
- the original data
"""
# Setup
data = pd.Series([1, 2, 3, 4])
transformed = pd.Series([0.875, 0.625, 0.375, 0.125])
transformer = CategoricalTransformer()
transformer.means = pd.Series([0.125, 0.375, 0.625, 0.875], index=[4, 3, 2, 1])
transformer.dtype = data.dtype
virtual_memory = Mock()
virtual_memory.available = 4 * 4 * 8 * 3 + 1
psutil_mock.return_value = virtual_memory
# Run
reverse = transformer._reverse_transform_by_matrix(transformed)
# Assert
pd.testing.assert_series_equal(data, reverse)
@patch('psutil.virtual_memory')
def test__reverse_transform_by_category_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_category` method is called.
When there is not enough virtual memory and the number of rows is greater than the
number of categories, expect that the `_reverse_transform_by_category` method is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return an `available_memory` of 1.
Input:
- numerical data with 5 rows
Output:
- the output of `_reverse_transform_by_category`
Side effects:
- `_reverse_transform_by_category` will be called once
"""
# Setup
transform_data = pd.Series([1, 3, 3, 2, 1])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
categorical_transformer_mock._normalize.return_value = transform_data
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
# Run
reverse = CategoricalTransformer.reverse_transform(
categorical_transformer_mock, transform_data)
# Asserts
categorical_transformer_mock._reverse_transform_by_category.assert_called_once_with(
transform_data)
assert reverse == categorical_transformer_mock._reverse_transform_by_category.return_value
@patch('psutil.virtual_memory')
def test__reverse_transform_by_category(self, psutil_mock):
"""Test the _reverse_transform_by_category method with numerical data.
Expect that the transformed data is correctly reverse transformed.
Setup:
The categorical transformer is instantiated with 4 categories, and the means
and intervals are set for those categories. Also patch the `psutil.virtual_memory`
function to return an `available_memory` of 1.
Input:
- transformed data with 5 rows
Ouptut:
- the original data
"""
data = pd.Series([1, 3, 3, 2, 1])
transformed = pd.Series([0.875, 0.375, 0.375, 0.625, 0.875])
transformer = CategoricalTransformer()
transformer.means = pd.Series([0.125, 0.375, 0.625, 0.875], index=[4, 3, 2, 1])
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
transformer.dtype = data.dtype
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
reverse = transformer._reverse_transform_by_category(transformed)
pd.testing.assert_series_equal(data, reverse)
@patch('psutil.virtual_memory')
def test__reverse_transform_by_row_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_row` method is called.
When there is not enough virtual memory and the number of rows is less than or equal
to the number of categories, expect that the `_reverse_transform_by_row` method
is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return an `available_memory` of 1.
Input:
- numerical data with 4 rows
Output:
- the output of `_reverse_transform_by_row`
Side effects:
- `_reverse_transform_by_row` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
categorical_transformer_mock.starts = pd.DataFrame(
[0., 0.25, 0.5, 0.75], index=[4, 3, 2, 1], columns=['category'])
categorical_transformer_mock._normalize.return_value = data
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
# Run
reverse = CategoricalTransformer.reverse_transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._reverse_transform_by_row.assert_called_once_with(data)
assert reverse == categorical_transformer_mock._reverse_transform_by_row.return_value
@patch('psutil.virtual_memory')
def test__reverse_transform_by_row(self, psutil_mock):
"""Test the _reverse_transform_by_row method with numerical data.
Expect that the transformed data is correctly reverse transformed.
Setup:
The categorical transformer is instantiated with 4 categories, and the means, starts,
and intervals are set for those categories. Also patch the `psutil.virtual_memory`
function to return an `available_memory` of 1.
Input:
- transformed data with 4 rows
Ouptut:
- the original data
"""
# Setup
data = | pd.Series([1, 2, 3, 4]) | pandas.Series |
### Author <NAME> - 30 September 2020 ###
import pandas as pd
import numpy as np
from collections import Counter
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
import time
import argparse
# Currently use sys to get other script - in Future use package
import os
import sys
path_main = ("/".join(os.path.realpath(__file__).split("/")[:-2]))
sys.path.append(path_main + '/Classes/')
sys.path.append(path_main + '/Utils/')
from media_class import Medium, Supplement, GrowthMedium, Medium_one_hot, Supplement_one_hot, GrowthMedium_one_hot
from help_functions import mean, str_to_bool, str_none_check
def extract_data(meta_data, maf_data, level="Lowest"):
'''
Changing data for plotting
input:
meta_data = is a loaded pandas dataframe after media.py
maf_data = is a loaded pandas dataframe after maf.py
return:
heatmap_data = dataframe with gene occurence
counts_symbols = counter of symbols
counts_disease = counter of disease
'''
meta_data["file"] = [str(x).split("/")[-1].split(".")[0] for x in meta_data["PANEL_oncotated_maf_mutect2"]]
if level == "Highest":
meta_data = meta_data[meta_data["Disease_highest_level"] != "Unknown"]
disease_types = list(set(meta_data["Disease_highest_level"]))
elif level == "Lowest":
meta_data = meta_data[meta_data["Disease_lowest_level"] != "Unknown"]
disease_types = list(set(meta_data["Disease_lowest_level"]))
else:
raise KeyError(f"Wierd disease level was passed: {level}. please provide either Highest or Lowest")
# Counter symbols
counts_symbols = Counter(maf_data["Hugo_Symbol"])
counts_symbols = dict(counts_symbols.most_common(100))
maf_data = maf_data[maf_data["Hugo_Symbol"].isin(list(counts_symbols.keys()))]
counts_symbols = pd.Series(counts_symbols, name='Count')
counts_symbols.index.name = 'Hugo_Symbol'
counts_symbols = counts_symbols.reset_index()
# Counter disease
counts_disease = Counter(meta_data["Disease_lowest_level"])
counts_disease = dict(counts_disease.most_common(len(counts_disease)))
counts_disease = pd.Series(counts_disease, name='Count')
counts_disease.index.name = 'Disease_lowest_level'
counts_disease = counts_disease.reset_index()
# Merge and transform into matrix
heatmap_data = pd.merge(maf_data, meta_data, on="file")
if level == "Highest":
heatmap_data = heatmap_data.groupby(["Disease_highest_level", "Hugo_Symbol"]).size().reset_index(name="Count")
heatmap_data = heatmap_data.pivot(index='Hugo_Symbol', columns='Disease_highest_level', values='Count')
heatmap_data.index = heatmap_data.index.str.strip()
heatmap_data = heatmap_data.reindex(counts_symbols["Hugo_Symbol"].tolist())
heatmap_data = heatmap_data.reindex(counts_disease["Disease_highest_level"].tolist(), axis=1)
heatmap_data = heatmap_data.fillna(0)
elif level == "Lowest":
heatmap_data = heatmap_data.groupby(["Disease_lowest_level", "Hugo_Symbol"]).size().reset_index(name="Count")
heatmap_data = heatmap_data.pivot(index='Hugo_Symbol', columns='Disease_lowest_level', values='Count')
heatmap_data.index = heatmap_data.index.str.strip()
heatmap_data = heatmap_data.reindex(counts_symbols["Hugo_Symbol"].tolist())
heatmap_data = heatmap_data.reindex(counts_disease["Disease_lowest_level"].tolist(), axis=1)
heatmap_data = heatmap_data.fillna(0)
else:
raise KeyError(f"Wierd disease level was passed: {level}. please provide either Highest or Lowest")
return heatmap_data, counts_symbols, counts_disease
def plotting_normal(plot_data, counts_symbols, counts_disease, save=False, show=True, extention=""):
'''
Heatmap plotting genes
input:
plot_data = input data from extract_data
counts_symbols = counter of symbols
counts_disease = counter of disease
save = if you want to save the plot (default is True)
show = if you want to show the plot (default is True)
SavePath = If you want to save the plot where to save it (default is in Data/Ongoing folder)
'''
width_ratio = len(plot_data.columns)
# Get a combined legend using the max value
max_value = np.max(np.max(plot_data))
f, ax = plt.subplots(2, 2, figsize=(30, 45), gridspec_kw={'width_ratios': [1, 5], 'height_ratios': [1, 7.5]})
ax_genes = ax[1][0].plot(counts_symbols["Count"], counts_symbols["Hugo_Symbol"])
ax_disease = ax[0][1].plot(counts_disease["Disease_lowest_level"], counts_disease["Count"])
ax_main = sns.heatmap(plot_data, ax=ax[1][1], square=False, vmin=0, cbar=False, cbar_kws={"shrink": .60})
ax[0][1].set_xlim(xmin=0, xmax=len(counts_disease)-1)
ax[1][0].set_ylim(ymin=0, ymax=len(counts_symbols)-1)
ax[0][1].set_xticks([])
ax[1][0].set_yticks([])
ax[1][0].invert_yaxis()
ax[1][0].xaxis.tick_top()
ax[1][0].invert_xaxis()
ax[1][1].yaxis.set_label_text('')
ax[1][1].xaxis.set_label_text('')
ax[1][1].yaxis.tick_right()
ax[1][1].set_yticklabels(counts_symbols["Hugo_Symbol"], rotation=0, ha='left', fontsize=16)
ax[1][1].set_xticklabels(counts_disease["Disease_lowest_level"], rotation=90, ha='right', fontsize=16)
ax[0][1].set_yticklabels([0,0,100,200,300,400,500,600], rotation=0, fontsize=16)
ax[1][0].set_xticklabels([0,100,200,300,400], rotation=0, fontsize=16)
f.delaxes(ax[0][0])
f.tight_layout()
ax[1][1].set_xticklabels(counts_disease["Disease_lowest_level"], rotation=45, ha='right', fontsize=16)
if save != False:
if not save.endswith("Figures/"):
save += "Figures/"
f.savefig(save + extention + "heatmap_genes_matrix.png")
if show == True:
f.show()
if show == False and save == False:
warnings.warn('you are not checking the input data for media/supplements')
def plotting_cluster(plot_data, save=False, show=True, extention=""):
'''
Heatmap plotting genes
input:
plot_data = input data from extract_data
save = if you want to save the plot (default is True)
show = if you want to show the plot (default is True)
SavePath = If you want to save the plot where to save it (default is in Data/Ongoing folder)
'''
ax = sns.clustermap(plot_main, figsize=(30, 45))
plt.setp(ax.ax_heatmap.get_yticklabels(), rotation=0, ha='left', fontsize=16) # For y axis
plt.setp(ax.ax_heatmap.get_xticklabels(), rotation=45, ha='right', fontsize=16) # For x axis
ax.ax_heatmap.set_xlabel("")
ax.ax_heatmap.set_ylabel("")
if save != False:
if not save.endswith("Figures/"):
save += "Figures/"
plt.savefig(save + extention + "heatmap_genes_matrix.png")
if show == True:
plt.show()
if show == False and save == False:
warnings.warn('you are not checking the input data for media/supplements')
def heatmap_genes_matrix(meta_data, maf_data, Path_meta=False, Path_maf=False, Order="Nothing", Scale="Normalized", level="Lowest", save=False, show=True):
'''
Main script for plotting heatmap genes matrix
input:
data_meta = loaded file after media.py
data_maf = loaded file after maf.py
Path_meta = location of file after media.py
Path_maf = location of file after maf.py
Order = how to order the heatmap choices=["Clustered", "Nothing", "Both"]
Scale = how to scale the heatmap values=["log2", "Normalized", "log10"]
save = if you want to save the plot (default is True)
show = if you want to show the plot (default is True)
SavePath = If you want to save the plot where to save it (default is in Data/Ongoing folder)
'''
# Read file if using command line
if Path_meta != False:
if not Path_meta.endswith(".pkl"):
Path_meta = Path_meta + "after_media.pkl"
meta_data = | pd.read_pickle(Path_meta) | pandas.read_pickle |
#!/usr/bin/env python
# coding: utf-8
# #Step by step process of analyzing data and finding the best predictor.
# In[ ]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../../../input/aljarah_xAPI-Edu-Data/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from subprocess import check_output
print(check_output(["ls", "../../../input/aljarah_xAPI-Edu-Data"]).decode("utf8"))
# Any results you write to the current directory are saved as output.
# First lets import everything
# In[ ]:
import seaborn as sns
sns.set(style='white')
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Lasso
from sklearn.linear_model import ElasticNet
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.metrics import mean_squared_error
# Reading the data and dropping place of birth due to similarity. Also looking at mean and std of the data.
# In[ ]:
df = pd.read_csv("../../../input/aljarah_xAPI-Edu-Data/xAPI-Edu-Data.csv")
#print df.shape
df = df.drop('PlaceofBirth',1)
#print df.head(5)
print (df.describe())
# Lets create count plots for better visualization of data.
# In[ ]:
ls = ['gender','Relation','Topic','SectionID','GradeID','NationalITy','Class','StageID','Semester','ParentAnsweringSurvey','ParentschoolSatisfaction','StudentAbsenceDays']
for i in ls:
g = sns.factorplot(i,data=df,kind='count',size=5,aspect=1.5)
print (df.shape)
# We can observe disproportionate difference in peaks of attributes such as nationality, GradeID.
# **Now let preprocess the data. First we do One hot Encoding to deal with categorical data .Then we split the data in train and test and also target and train. Finally we apply standard scaling to the data.**
# In[ ]:
#preprocessing
target = df.pop('Class')
X = | pd.get_dummies(df) | pandas.get_dummies |
import sys
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
def technical_indicators(df):
"""
Technical Indicator Calculator Function.
This Function's Output Is A Pandas DataFrame Of Various Techincal Indicators Such As RSI,SMA,EVM,EWMA
BB And ROC Using Different Time Intervals.
Parameters:
df (DataFrame) : Pandas DataFrame Of Stock Price
Returns:
new_df (DataFrame) : Pandas DataFrame Of Techincal Indicators
"""
new_df = pd.DataFrame()
dm = ((df['Stock_High'] + df['Stock_Low'])/2) - ((df['Stock_High'].shift(1) + df['Stock_Low'].shift(1))/2)
br = (df['Stock_Volume'] / 100000000) / ((df['Stock_High'] - df['Stock_Low']))
EVM = dm / br
new_df['EVM_15'] = EVM.rolling(15).mean()
sma_60 = pd.Series.rolling(df['Stock_Close'], window=60, center=False).mean()
new_df["SMA_60"] = sma_60
sma_200 = pd.Series.rolling(df['Stock_Close'], window=30, center=False).mean()
new_df["SMA_200"] = sma_200
ewma_50 = df['Stock_Close'].ewm(span = 50, min_periods = 50 - 1).mean()
new_df["EWMA_50"] = ewma_50
ewma_200 = df['Stock_Close'].ewm(span = 200, min_periods = 200 - 1).mean()
new_df["EWMA_200"] = ewma_200
sma_5 = pd.Series.rolling(df['Stock_Close'], window=5, center=False).mean()
std_5 = pd.Series.rolling(df['Stock_Close'], window=5, center=False).std()
bb_5_upper = sma_5 + (2 * std_5)
bb_5_lower = sma_5 - (2 * std_5)
new_df["BB_5_UPPER"] = bb_5_upper
new_df["BB_5_LOWER"] = bb_5_lower
new_df["SMA_5"] = sma_5
sma_10 = pd.Series.rolling(df['Stock_Close'], window=10, center=False).mean()
std_10 = pd.Series.rolling(df['Stock_Close'], window=10, center=False).std()
bb_10_upper = sma_10 + (2 * std_10)
bb_10_lower = sma_10 - (2 * std_10)
new_df["BB_10_UPPER"] = bb_10_upper
new_df["BB_10_LOWER"] = bb_10_lower
new_df["SMA_10"] = sma_10
sma_20 = pd.Series.rolling(df['Stock_Close'], window=20, center=False).mean()
std_20 = pd.Series.rolling(df['Stock_Close'], window=20, center=False).std()
bb_20_upper = sma_20 + (2 * std_20)
bb_20_lower = sma_20 - (2 * std_20)
new_df["BB_20_UPPER"] = bb_20_upper
new_df["BB_20_LOWER"] = bb_20_lower
new_df["SMA_20"] = sma_20
roc_5 = df['Stock_Close'][5:]/df['Stock_Close'][:-5].values - 1
new_df["ROC_5"] = roc_5
roc_10 = df['Stock_Close'][10:]/df['Stock_Close'][:-10].values - 1
new_df["ROC_10"] = roc_10
roc_20 = df['Stock_Close'][20:]/df['Stock_Close'][:-20].values - 1
new_df["ROC_20"] = roc_20
delta = df['Stock_Close'].diff()
up, down = delta.copy(), delta.copy()
up[up < 0] = 0
down[down > 0] = 0
up_5 = pd.Series.rolling(up, window=5, center=False).mean()
down_5 = pd.Series.rolling(down.abs(), window=5, center=False).mean()
RS_5 = up_5 / down_5
RSI_5 = 100.0 - (100.0 / (1.0 + RS_5))
new_df["RSI_5"] = RSI_5
up_10 = | pd.Series.rolling(up, window=10, center=False) | pandas.Series.rolling |
### Author <NAME> - 30 September 2020 ###
import pandas as pd
import numpy as np
from collections import Counter
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
import time
import argparse
# Currently use sys to get other script - in Future use package
import os
import sys
path_main = ("/".join(os.path.realpath(__file__).split("/")[:-2]))
sys.path.append(path_main + '/Classes/')
sys.path.append(path_main + '/Utils/')
from media_class import Medium, Supplement, GrowthMedium, Medium_one_hot, Supplement_one_hot, GrowthMedium_one_hot
from help_functions import mean, str_to_bool, str_none_check
def extract_data(meta_data, maf_data, level="Lowest"):
'''
Changing data for plotting
input:
meta_data = is a loaded pandas dataframe after media.py
maf_data = is a loaded pandas dataframe after maf.py
return:
heatmap_data = dataframe with gene occurence
counts_symbols = counter of symbols
counts_disease = counter of disease
'''
meta_data["file"] = [str(x).split("/")[-1].split(".")[0] for x in meta_data["PANEL_oncotated_maf_mutect2"]]
if level == "Highest":
meta_data = meta_data[meta_data["Disease_highest_level"] != "Unknown"]
disease_types = list(set(meta_data["Disease_highest_level"]))
elif level == "Lowest":
meta_data = meta_data[meta_data["Disease_lowest_level"] != "Unknown"]
disease_types = list(set(meta_data["Disease_lowest_level"]))
else:
raise KeyError(f"Wierd disease level was passed: {level}. please provide either Highest or Lowest")
# Counter symbols
counts_symbols = Counter(maf_data["Hugo_Symbol"])
counts_symbols = dict(counts_symbols.most_common(100))
maf_data = maf_data[maf_data["Hugo_Symbol"].isin(list(counts_symbols.keys()))]
counts_symbols = pd.Series(counts_symbols, name='Count')
counts_symbols.index.name = 'Hugo_Symbol'
counts_symbols = counts_symbols.reset_index()
# Counter disease
counts_disease = Counter(meta_data["Disease_lowest_level"])
counts_disease = dict(counts_disease.most_common(len(counts_disease)))
counts_disease = pd.Series(counts_disease, name='Count')
counts_disease.index.name = 'Disease_lowest_level'
counts_disease = counts_disease.reset_index()
# Merge and transform into matrix
heatmap_data = | pd.merge(maf_data, meta_data, on="file") | pandas.merge |
import os
import sys
import glob
import gc
import datetime
from tqdm import tqdm
import tkinter
from tkinter import filedialog
from tkinter import messagebox
import pydicom
import pandas as pd
def select_directory(initial_path):
'''tkinterのGUIでDICOMが入ったディレクトリを指定させて、そのディレクトリのパスを出力'''
tk = tkinter.Tk()
tk.withdraw()
dicom_directory = filedialog.askdirectory(
initialdir=initial_path, title='DICOMファイルが含まれるフォルダを選択')
return dicom_directory
def get_file_path(directory):
'''ディレクトリを入力し、ディレクトリ内のファイルのパスをすべて出力'''
if directory == '':
sys.exit(0)
path_of_files = glob.glob(directory + '/**/*', recursive=True)
return path_of_files
def get_dicom_path(path):
'''パスを入力し、その中のDICOMだけを出力'''
dicom_files = []
for p in tqdm(path, desc='データを読み込み中'):
try:
dicom_files.append(pydicom.dcmread(p))
except:
pass
return dicom_files
def separate_dicom_files(dicom_files):
'''DICOMファイルを入力し、RDSRとPETに分割してそれぞれをタプルで出力'''
rdsr_files = []
pet_files = []
for f in tqdm(dicom_files, desc='データを分割中'):
try:
if f.SOPClassUID == '1.2.840.10008.5.1.4.1.1.88.67':
rdsr_files.append(f)
elif f.Modality == 'PT' and f.ImageType[0] == 'ORIGINAL':
pet_files.append(f)
except:
pass
return rdsr_files, pet_files
def separate_CT_Acquisition(rdsr_files):
'''RDSRファイルのデータを入力し,CTの線量情報が記載されたレベルのネストの情報を出力する'''
CTAcquisition = []
for r in rdsr_files[0x0040,0xa730].value:
try:
if r[0x0040,0xa043][0][0x0008,0x0100].value == '113819': # CTAcquisition_code
CTAcquisition.append(r[0x0040,0xa730])
except:
pass
return CTAcquisition
def extract_data_from_CT_Acquisition(rdsr_col, CTAcquisition):
'''1つのCT Acquisitionを入力し,そのデータを辞書で返す'''
# 各データの区切り部分のEV
CTAcquisitionParameters_code = '113822'
CTXraySourceParameters_code = '113831'
CTDose_code = '113829'
DeviceRoleinProcedure_code = '113876'
DoseCheckNotificationDetails_code = '113908'
# 空の辞書tmpを作成
tmp_dictionary = {col: [] for col in rdsr_col.keys()}
for _, nest1 in enumerate(CTAcquisition.value):
try:
if nest1[0x0040,0xa043][0][0x0008,0x0100].value == rdsr_col['AcquisitionProtocol']:
tmp_dictionary['AcquisitionProtocol'] = nest1[0x0040, 0xa160].value
elif nest1[0x0040,0xa043][0][0x0008,0x0100].value == rdsr_col['TargetRegion']:
tmp_dictionary['TargetRegion'] = nest1[0x0040,0xa168][0][0x0008, 0x0104].value
elif nest1[0x0040,0xa043][0][0x0008,0x0100].value == rdsr_col['CTAcquisitionType']:
tmp_dictionary['CTAcquisitionType'] = nest1[0x0040,0xa168][0][0x0008, 0x0104].value
elif nest1[0x0040,0xa043][0][0x0008,0x0100].value == rdsr_col['ProcedureContext']:
tmp_dictionary['ProcedureContext'] = nest1[0x0040,0xa168][0][0x0008, 0x0104].value
elif nest1[0x0040,0xa043][0][0x0008,0x0100].value == CTAcquisitionParameters_code:
try:
for _, nest2 in enumerate(nest1[0x0040,0xa730].value):
if nest2[0x0040,0xa043][0][0x0008,0x0100].value == rdsr_col['ExposureTime']:
tmp_dictionary['ExposureTime'] = nest2[0x0040,0xa300][0][0x0040,0xa30a].value
elif nest2[0x0040,0xa043][0][0x0008,0x0100].value == rdsr_col['ScanningLength']:
tmp_dictionary['ScanningLength'] = nest2[0x0040,0xa300][0][0x0040,0xa30a].value
elif nest2[0x0040,0xa043][0][0x0008,0x0100].value == rdsr_col['ExposedRange']:
tmp_dictionary['ExposedRange'] = nest2[0x0040,0xa300][0][0x0040,0xa30a].value
elif nest2[0x0040,0xa043][0][0x0008,0x0100].value == rdsr_col['NominalSingleCollimationWidth']:
tmp_dictionary['NominalSingleCollimationWidth'] = nest2[0x0040,0xa300][0][0x0040,0xa30a].value
elif nest2[0x0040,0xa043][0][0x0008,0x0100].value == rdsr_col['NominalTotalCollimationWidth']:
tmp_dictionary['NominalTotalCollimationWidth'] = nest2[0x0040,0xa300][0][0x0040,0xa30a].value
elif nest2[0x0040,0xa043][0][0x0008,0x0100].value == rdsr_col['PitchFactor']:
tmp_dictionary['PitchFactor'] = nest2[0x0040,0xa300][0][0x0040,0xa30a].value
elif nest2[0x0040,0xa043][0][0x0008,0x0100].value == CTXraySourceParameters_code:
try:
for _, nest3 in enumerate(nest2[0x0040,0xa730].value):
if nest3[0x0040,0xa043][0][0x0008,0x0100].value == rdsr_col['IdentificationoftheXRaySource']:
tmp_dictionary['IdentificationoftheXRaySource'] = nest3[0x0040,0xa160].value
elif nest3[0x0040,0xa043][0][0x0008,0x0100].value == rdsr_col['KVP']:
tmp_dictionary['KVP'] = nest3[0x0040,0xa300][0][0x0040,0xa30a].value
elif nest3[0x0040,0xa043][0][0x0008,0x0100].value == rdsr_col['MaximumXRayTubeCurrent']:
tmp_dictionary['MaximumXRayTubeCurrent'] = nest3[0x0040,0xa300][0][0x0040,0xa30a].value
elif nest3[0x0040,0xa043][0][0x0008,0x0100].value == rdsr_col['MeanXRayTubeCurrent']:
tmp_dictionary['MeanXRayTubeCurrent'] = nest3[0x0040,0xa300][0][0x0040,0xa30a].value
elif nest3[0x0040,0xa043][0][0x0008,0x0100].value == rdsr_col['ExposureTimeperRotation']:
tmp_dictionary['ExposureTimeperRotation'] = nest3[0x0040,0xa300][0][0x0040,0xa30a].value
except:
pass
except:
pass
elif nest1[0x0040,0xa043][0][0x0008,0x0100].value == CTDose_code:
try:
for _, nest2 in enumerate(nest1[0x0040,0xa730].value):
if nest2[0x0040,0xa043][0][0x0008,0x0100].value == rdsr_col['MeanCTDIvol']:
tmp_dictionary['MeanCTDIvol'] = nest2[0x0040,0xa300][0][0x0040,0xa30a].value
elif nest2[0x0040, 0xa043][0][0x0008, 0x0100].value == rdsr_col['CTDIwPhantomType']:
tmp_dictionary['CTDIwPhantomType'] = nest2[0x0040,0xa168][0][0x0008,0x0104].value
elif nest2[0x0040,0xa043][0][0x0008,0x0100].value == rdsr_col['DLP']:
tmp_dictionary['DLP'] = nest2[0x0040,0xa300][0][0x0040,0xa30a].value
elif nest2[0x0040,0xa043][0][0x0008,0x0100].value == DoseCheckNotificationDetails_code:
try:
for _, nest3 in enumerate(nest2[0x0040,0xa730].value):
if nest3[0x0040,0xa043][0][0x0008,0x0100].value == rdsr_col['DLPNotificationValue']:
tmp_dictionary['DLPNotificationValue'] = nest3[0x0040,0xa300][0][0x0040,0xa30a].value
elif nest3[0x0040,0xa043][0][0x0008,0x0100].value == rdsr_col['CTDIvolNotificationValue']:
tmp_dictionary['CTDIvolNotificationValue'] = nest3[0x0040,0xa300][0][0x0040,0xa30a].value
elif nest3[0x0040,0xa043][0][0x0008,0x0100].value == rdsr_col['ReasonforProceeding']:
tmp_dictionary['ReasonforProceeding'] = nest3[0x0040,0xa160].value
except:
pass
except:
pass
elif nest1[0x0040,0xa043][0][0x0008,0x0100].value == rdsr_col['Comment']:
tmp_dictionary['Comment'] = nest1[0x0040,0xa160].value
elif nest1[0x0040,0xa043][0][0x0008,0x0100].value == rdsr_col['XRayModulationType']:
tmp_dictionary['XRayModulationType'] = nest1[0x0040,0xa160].value
elif nest1[0x0040,0xa043][0][0x0008,0x0100].value == DeviceRoleinProcedure_code:
try:
for _, nest2 in enumerate(nest1[0x0040,0xa730].value):
if nest2[0x0040,0xa043][0][0x0008,0x0100].value == rdsr_col['DeviceManufacturer']:
tmp_dictionary['DeviceManufacturer'] = nest2[0x0040,0xa160].value
elif nest2[0x0040,0xa043][0][0x0008,0x0100].value == rdsr_col['DeviceSerialNumber']:
tmp_dictionary['DeviceSerialNumber'] = nest2[0x0040,0xa160].value
except:
pass
except:
pass
# データが入っていない箇所を空欄とする
for col in rdsr_col.keys():
if not tmp_dictionary[col]:
tmp_dictionary[col] = ' '
return tmp_dictionary
def get_events_from_rdsr(rdsr_files):
'''CTの曝射回数をRDSRからeventsとして読み取る'''
# EventsのEV
TotalNumberofIrradiationEvents_code = '113812'
for _, r in enumerate(rdsr_files[0x0040,0xa730].value):
try:
if r[0x0040,0xa730][0][0x0040,0xa043][0][0x0008,0x0100].value == TotalNumberofIrradiationEvents_code:
events = r[0x0040,0xa730][0][0x0040,0xa300][0][0x0040,0xa30a].value
except:
pass
return events
def extract_CT_Dose_Length_Product_Total(rdsr_files):
'''RDSRからCT Dose Length Product Totalを抽出し,辞書で出力'''
# CT Dose Length Product TotalのEV
CTDoseLengthProductTotal_code = '113813'
for _, r in enumerate(rdsr_files[0x0040,0xa730].value):
try:
if r[0x0040,0xa730][1][0x0040,0xa043][0][0x0008,0x0100].value == CTDoseLengthProductTotal_code:
CDLPT = r[0x0040,0xa730][1][0x0040,0xa300][0][0x0040,0xa30a].value
except:
pass
return CDLPT
def extract_data_from_rdsr_header(rdsr_header_col_names, rdsr_files, events):
'''RDSRのヘッダーから情報を抽出し,辞書で出力'''
# 空の辞書tmpを作成
tmp_header_dictionary = {col: [] for col in rdsr_header_col_names}
for num, rdsr in enumerate(rdsr_files):
for eve in range(int(events[num])):
for name in rdsr_header_col_names:
try:
tmp_header_dictionary[name].append(
str(getattr(rdsr, name)))
except:
tmp_header_dictionary[name].append(" ")
return tmp_header_dictionary
def extract_information_from_PET(PET):
'''PETから必要な情報を取得して,pd.DataFrameとして出力'''
pet_col_name = ['PatientID', 'StudyDate', 'RadionuclideTotalDose']
pet_df = pd.DataFrame(columns=pet_col_name)
for p in PET:
pet_tmp_data = []
for col in pet_col_name[0:2]:
pet_tmp_data.append(str(getattr(p, col)))
pet_tmp_data.append(
p.RadiopharmaceuticalInformationSequence[0].RadionuclideTotalDose)
tmp_se = | pd.Series(pet_tmp_data, index=pet_col_name) | pandas.Series |
#Download and clean nest label series from Zooniverse
import pandas as pd
import geopandas as gpd
from panoptes_client import Panoptes
from shapely.geometry import box, Point
import json
import numpy as np
import os
from datetime import datetime
import utils
def species_from_label(value):
label_dict = {}
label_dict[0] = "Great Egret"
label_dict[1] = "Snowy Egret"
label_dict[2] = "White Ibis"
label_dict[3] = "Great Blue Heron"
label_dict[4] = "Wood Stork"
label_dict[5] = "Roseate Spoonbill"
label_dict[6] = "Anhinga"
label_dict[7] = "Other"
label_dict[8] = "Unknown"
return label_dict[value]
def download_data(everglades_watch, min_version, generate=False):
#see https://panoptes-python-client.readthedocs.io/en/v1.1/panoptes_client.html#module-panoptes_client.classification
classification_export = everglades_watch.get_export('classifications', generate=generate)
rows = []
for row in classification_export.csv_dictreader():
rows.append(row)
df = | pd.DataFrame(rows) | pandas.DataFrame |
import pandas as pd
import os
global spot_df
spot_df = pd.read_pickle('data/tge_spot_preprocessed.p')
def diff_forecast(file_path, file_name):
forecast_df = pd.read_pickle(file_path+file_name+'.p')
if type(forecast_df) == type( | pd.DataFrame() | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.