prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pytest
import pandas as pd
import pandas._testing as tm
@pytest.mark.parametrize(
"values, dtype",
[
([], "object"),
([1, 2, 3], "int64"),
([1.0, 2.0, 3.0], "float64"),
(["a", "b", "c"], "object"),
(["a", "b", "c"], "string"),
([1, 2, 3], "datetime64[ns]"),
([1, 2, 3], "datetime64[ns, CET]"),
([1, 2, 3], "timedelta64[ns]"),
(["2000", "2001", "2002"], "Period[D]"),
([1, 0, 3], "Sparse"),
([pd.Interval(0, 1), pd.Interval(1, 2), pd.Interval(3, 4)], "interval"),
],
)
@pytest.mark.parametrize(
"mask", [[True, False, False], [True, True, True], [False, False, False]]
)
@pytest.mark.parametrize("indexer_class", [list, pd.array, pd.Index, pd.Series])
@pytest.mark.parametrize("frame", [True, False])
def test_series_mask_boolean(values, dtype, mask, indexer_class, frame):
# In case len(values) < 3
index = ["a", "b", "c"][: len(values)]
mask = mask[: len(values)]
obj = pd.Series(values, dtype=dtype, index=index)
if frame:
if len(values) == 0:
# Otherwise obj is an empty DataFrame with shape (0, 1)
obj = | pd.DataFrame(dtype=dtype) | pandas.DataFrame |
'''
Plots all the results of the dwglasso analysis on a map of Canada.
NOTE: This file is intended to be executed by make from the top
level of the project directory hierarchy. We rely on os.getcwd()
and it will not work if run directly as a script from this directory.
'''
from dwglasso import cross_validate, dwglasso
import matplotlib as mpl; mpl.use('TkAgg')
from matplotlib import pyplot as plt
from itertools import combinations_with_replacement
import numpy as np
import pandas as pd
import geopandas as gpd
from shapely.geometry import Point, LineString
from src.conf import CANADA_SHAPE, HDF_FINAL_FILE, LOCATIONS_KEY, MAX_P,\
P_LAG, ZZT_FILE_PREFIX, YZT_FILE_PREFIX, X_VALIDATE_FILE_PREFIX
def main():
p = P_LAG
assert p <= MAX_P and p >= 1, 'p must be in (1, MAX_P)!'
ZZT = np.load(ZZT_FILE_PREFIX + str(p) + '_T' + '.npy')
YZT = np.load(YZT_FILE_PREFIX + str(p) + '_T' + '.npy')
X_test = np.load(X_VALIDATE_FILE_PREFIX + '_dT' + '.npy')
# plt.imshow(ZZT)
# plt.colorbar()
# plt.title('Covariance Matrix ZZT')
# plt.show()
# plt.imshow(YZT)
# plt.colorbar()
# plt.title('Cross-Covariance Matrix YZT')
# plt.show()
# plt.plot(np.sort(np.linalg.eigvals(ZZT)))
# plt.title('ZZT Eigenvalues')
# plt.show()
# # Run dwglasso
# # B_hat = cross_validate(ZZT, YZT, X_test, p, tol=1e-9)
# N_edges = []
# n = YZT.shape[0]
# Lmbda = np.linspace(30, 150, 350)
# G_total = np.zeros((n, n))
# for lmbda in Lmbda:
# B_hat = dwglasso(ZZT, YZT, p, lmbda=lmbda, alpha=0.1,
# delta=0.1, sigma=15, silent=True, max_iter=250)
# print('lmbda =%9.3f' % lmbda, end='\r')
# G = np.abs(sum([B_hat[:, tau * n:(tau + 1) * n]
# for tau in range(p)]).T)
# G = G > 0 # The Granger-causality graph
# G = G - np.diag(np.diag(G)) # Zero the diagonal (no looping edges)
# G_total += G
# N_edges.append(np.sum(G))
# plt.plot(Lmbda, N_edges)
# plt.xscale('log')
# plt.title('N_edges vs lmbda (s = 15, alpha = 0.1, delta = 0.1, mu = 0.1')
# plt.xlabel('lmbda')
# plt.show()
# plt.imshow(G_total)
# plt.title('Edge count over lmbda')
# plt.colorbar()
# plt.show()
# return
# l, a, d, s = 0.00803, 0.80667, 0.77534, 99.76833 # on _dT
B_hat = dwglasso(ZZT, YZT, p, lmbda=1.0, alpha=0.1, delta=0.1,
sigma=15.0, max_iter=250, mu=0.01, tol=1e-9)
# plt.imshow(B_hat)
# plt.colorbar()
# plt.title('DWGLASSO Result')
# plt.show()
n = B_hat.shape[0]
assert B_hat.shape[1] // n == p, 'Issue with matrix sizes!'
# Get the actual causality matrix
G = np.abs(sum([B_hat[:, tau * n:(tau + 1) * n] for tau in range(p)]).T)
G = G > 0 # The Granger-causality graph
G = G ^ np.diag(np.diag(G)) # Zero the diagonal (no looping edges)
print('Num edges: ', np.sum(G), '/', ((n * p) * (n * p) + 1) // 2)
# plt.imshow(G)
# plt.title('Granger-causality Graph')
# plt.show()
# Plot the map of canada
canada = gpd.read_file(CANADA_SHAPE)
del canada['NOM'] # Drop French names
fig, ax = plt.subplots(1, 1)
ax.set_title('Canada')
ax.axes.get_yaxis().set_ticks([]) # Remove the ticks
ax.axes.get_xaxis().set_ticks([])
canada.plot(ax=ax, color='white', linewidth=0.5)
hdf_final = | pd.HDFStore(HDF_FINAL_FILE, mode='r') | pandas.HDFStore |
# Copyright (c) 2020 Huawei Technologies Co., Ltd.
# <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from src.cpePaser import week_extract
from src.cpePaser import day_extract
from src.cpePaser import month_extract
from src.cpePaser import extract_data
import os
from src.compress import compress
import pandas as pd
from src.setting import setting
from src.timeOperator import timeOpt
import time
import multiprocessing
from src.logger_setting.my_logger import get_logger
logger = get_logger()
MONTH1 = '1'
MONTH2 = '2'
TRAIN = '0'
PREDICT = '1'
MAX_INVALID_VALUE = 9999
def get_data_by_range(first_day, last_day):
df = pd.DataFrame(columns=setting.month_column_name)
date_file_map = week_extract.get_date_map()
for k, v in date_file_map.items():
if timeOpt.is_in_time(k, first_day, last_day):
day_df = pd.read_csv(v, error_bad_lines=False, index_col=False)[setting.month_column_name]
day_df['date'] = k
df = df.append(day_df)
return df
def build_feature(df):
start_day = week_extract.get_min_month()
df_train = df[(df['date'] >= start_day) & (df['date'] < timeOpt.add_months(start_day, 2))]
df_pre = df[(df['date'] >= timeOpt.add_months(start_day, 1)) & (df['date'] < timeOpt.add_months(start_day, 3))]
df_for_churn = set(
df[(df['date'] >= timeOpt.add_months(start_day, 2)) & (df['date'] < timeOpt.add_months(start_day, 3))][
'esn'].astype('str').values)
compress.empty_folder(setting.model_path)
train_result_df = build(df_train, df_for_churn, TRAIN)
train_result_df.to_csv(os.path.join(setting.model_path, r"trainData.csv"), index=False)
pre_result_df = build(df_pre, df_for_churn, PREDICT)
pre_result_df[(pre_result_df['churnLabel'] < 1)].to_csv(
os.path.join(setting.model_path, r"predictData.csv"), index=False)
return 0
def build(data, not_churn_esn, build_type):
df_rsrp_sinr = get_all_data_df(data)
result_df = calc_week(data, build_type)
df_month1 = calc_month(data, MONTH1, build_type)
df_month2 = calc_month(data, MONTH2, build_type)
result_df = merge_data(result_df, df_month1)
result_df = merge_data(result_df, df_month2)
result_df = merge_data(result_df, df_rsrp_sinr)
result_df['churnLabel'] = result_df.esn.apply(lambda x: 0 if x in not_churn_esn else 1)
result_df = pd.DataFrame(result_df, columns=setting.parameter_json["xgboost_columns"])
result_df['dlTrafficMonth2Compare1'] = result_df.apply(
lambda x: calc_compare(x['dlTrafficMonth1'], x['dlTrafficMonth2']), axis=1)
result_df['ulTrafficMonth2Compare1'] = result_df.apply(
lambda x: calc_compare(x['ulTrafficMonth1'], x['ulTrafficMonth2']), axis=1)
result_df['dlTrafficPerdayMonth2Compare1'] = result_df.apply(
lambda x: calc_compare(x['dlTrafficPerdayMonth1'], x['dlTrafficPerdayMonth2']), axis=1)
result_df['ulTrafficPerdayMonth2Compare1'] = result_df.apply(
lambda x: calc_compare(x['ulTrafficPerdayMonth1'], x['ulTrafficPerdayMonth2']), axis=1)
result_df['ulTrafficPerdayMonth2Compare1'] = result_df.apply(
lambda x: calc_compare(x['totalDlUlTrafficPerdayMonth1'], x['totalDlUlTrafficPerdayMonth2']), axis=1)
result_df['connectTimeMonth2Compare1'] = result_df.apply(
lambda x: calc_compare(x['totalConnectTimeMonth1'], x['totalConnectTimeMonth2']), axis=1)
result_df['ulDlTrafficPerdayMonth2Compare1'] = result_df.apply(
lambda x: calc_compare(x['totalDlUlTrafficPerdayMonth1'], x['totalDlUlTrafficPerdayMonth2']), axis=1)
result_df['dlTrafficWeek9Compare8'] = result_df.apply(
lambda x: calc_compare(x['TotalDownloadWeek8'], x['TotalDownloadWeek9']), axis=1)
result_df['dlTrafficWeek8Compare7'] = result_df.apply(
lambda x: calc_compare(x['TotalDownloadWeek7'], x['TotalDownloadWeek8']), axis=1)
result_df['ulTrafficWeek9Compare8'] = result_df.apply(
lambda x: calc_compare(x['TotalUploadWeek8'], x['TotalUploadWeek9']), axis=1)
result_df['ulTrafficWeek8Compare7'] = result_df.apply(
lambda x: calc_compare(x['TotalUploadWeek7'], x['TotalUploadWeek8']), axis=1)
result_df['connectTimeWeek9Compare8'] = result_df.apply(
lambda x: calc_compare(x['TotalConnectTimeWeek8'], x['TotalConnectTimeWeek9']), axis=1)
result_df['connectTimeWeek8Compare7'] = result_df.apply(
lambda x: calc_compare(x['TotalConnectTimeWeek7'], x['TotalConnectTimeWeek8']), axis=1)
return result_df
def calc_compare(data1, data2):
if not data1 and not data2:
return MAX_INVALID_VALUE
elif not data1:
return MAX_INVALID_VALUE
elif not data2:
return -1
else:
return (data2 - data1) / data1
def get_all_data_df(df):
train_group_month = day_extract.groupby_calc(df).apply(calc_all_data).reset_index(drop=True)
return train_group_month
def calc_all_data(df):
esn = df['esn'].values
rsrp_sinr = month_extract.calc_rsrp_sinr(df)
ecgi = day_extract.get_main_cell(df['ECGI'].values)
ecgi_id = extract_data.PAT_NUM.findall(ecgi) if ecgi != setting.INVALID_STRING else ["-", "-"]
data = {'esn': esn[0],
'ENODEBID': ecgi_id[0],
'CELLID': ecgi_id[1],
'MinRSRP': rsrp_sinr['MinRSRP'],
'MaxRSRP': rsrp_sinr['MaxRSRP'],
'AvgRSRP': rsrp_sinr['AvgRSRP'],
'StdRSRP': rsrp_sinr['StdRSRP'],
'MinSINR': rsrp_sinr['MinSINR'],
'MaxSINR': rsrp_sinr['MaxSINR'],
'AvgSINR': rsrp_sinr['AvgSINR'],
'StdSINR': rsrp_sinr['StdSINR']}
result = | pd.DataFrame(data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
Harmonize the features between the target and the source data so that:
- same feature space is considered between the source and the target.
- features are odered in the same way, avoiding permutation issue.
"""
import numpy as np
import pandas as pd
def harmonize_feature_naming(target_data,
source_data,
target_gene_names,
source_gene_names,
remove_mytochondria=False,
gene_lookup_file=None):
#Find common genes
common_genes = np.intersect1d(target_gene_names, source_gene_names)
#Find common gene location
target_common_gene_index = np.where(np.isin(target_gene_names, common_genes))[0]
source_common_gene_index = np.where(np.isin(source_gene_names, common_genes))[0]
#Stack data
target_data = target_data[:,target_common_gene_index]
source_data = source_data[:,source_common_gene_index]
if remove_mytochondria and gene_lookup_file is not None:
#Load table
genes_lookup_table = | pd.read_csv(gene_lookup_file, delimiter=',') | pandas.read_csv |
import warnings
warnings.filterwarnings("ignore")
import os
import math
import numpy as np
import tensorflow as tf
import pandas as pd
import argparse
import json
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler
from sklearn.manifold import TSNE
from scipy.stats import spearmanr, pearsonr
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
plt.rcParams["figure.figsize"] = (8,8)
import seaborn as sns
from umap import UMAP
import configparser as cp
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
###################################################
# Read in configuration file for network parameters
###################################################
config = cp.ConfigParser()
config.read('config.py')
rna_hidden_dim_1 = int(config.get('AE', 'RNA_Layer1'))
rna_hidden_dim_2 = int(config.get('AE', 'RNA_Layer2'))
rna_latent_dim = int(config.get('AE', 'RNA_Latent'))
translate_hidden_dim = int(config.get('AE', 'Translate_Layer1'))
l2_norm_dec = float(config.get('AE', 'L2_Norm_AE'))
l2_norm_kl = float(config.get('AE', 'L2_Norm_KL'))
learning_rate_kl = float(config.get('AE', 'Learning_Rate_KL'))
learning_rate_ae = float(config.get('AE', 'Learning_Rate_AE'))
Z_dim = int(config.get('CGAN', 'Z_dim'))
gen_dim1 = int(config.get('CGAN', 'Gen_Layer1'))
gen_dim2 = int(config.get('CGAN', 'Gen_Layer2'))
det_dim = int(config.get('CGAN', 'Det_Layer1'))
learning_rate_cgan = float(config.get('CGAN', 'Learning_Rate'))
l2_norm_cgan = float(config.get('CGAN', 'L2_Lambda'))
epoch_cgan = int(config.get('CGAN', 'Max_Epoch'))
###################################################
# Read in command line arguments
###################################################
parser = argparse.ArgumentParser(description='Perform Pseudocell Tracer Algorithm')
parser.add_argument('-d', '--data', help='Tab delimited file representing matrix of samples by genes', required=True)
parser.add_argument('-s', '--side_data', help= 'Tab delimited file for side information to be used', required=True)
parser.add_argument('-o', '--output', help='Output directory', required=True)
parser.add_argument('-p', '--plot_style', help= 'Plotting style to be used (UMAP or tSNE)', default="UMAP")
parser.add_argument('-n', '--num_cells_gen', help='Number of pseudocells to generate at each step', default=100, type=int)
parser.add_argument('-k', '--num_steps', help='Number of pseudocell states', default=100, type=int)
parser.add_argument('-a', '--start_states', help='Number of pseudocell states', nargs="+", required=True)
parser.add_argument('-b', '--end_states', help='Number of pseudocell states', nargs="+", required=True)
parser.add_argument('-g', '--genes_to_plot', help='Genes to plot in pseudocell trajectory', nargs="+")
args = parser.parse_args()
dset = args.data
sset = args.side_data
out = args.output
plot_method = args.plot_style
num_cells_gen = args.num_cells_gen
num_steps = args.num_steps
start_states = args.start_states
end_states = args.end_states
genes_to_plot = args.genes_to_plot
###################################################
# Function to plot
###################################################
def plot_data(rna, side):
if plot_method == "tSNE":
tsne = TSNE()
results = np.array(tsne.fit_transform(rna.values))
if plot_method == "UMAP":
umap = UMAP()
results = np.array(umap.fit_transform(rna.values))
plot_df = pd.DataFrame(data=results, columns=["X","Y"])
cl = np.argmax(side.values, axis=1)
plot_df["subtype"] = [side.columns[x] for x in cl]
return sns.scatterplot(x="X", y="Y", hue="subtype", data=plot_df)
###################################################
# Load Data
###################################################
rna_data = pd.read_csv(dset, header=0, index_col=0, sep="\t")
side_data = pd.read_csv(sset, index_col=0, header=0, sep="\t")
###################################################
# Filter common samples
###################################################
sample_list = np.intersect1d(rna_data.columns.values, side_data.columns.values)
gene_list = rna_data.index.values
side_data = side_data.filter(sample_list, axis=1)
rna_data = rna_data.filter(sample_list, axis=1)
rna_data = rna_data.transpose()
side_data = side_data.transpose()
print("Loaded Data...")
###################################################
# Create output directory
###################################################
print("Creating output directory...")
try:
os.mkdir("results")
except:
pass
out_dir = "results/" + out
try:
os.mkdir(out_dir)
print("Directory " , out_dir , " Created...")
except FileExistsError:
print("Warning: Directory " , out , " already exists...")
with open(out_dir + '/run_parameters.txt', 'w') as f:
json.dump(args.__dict__, f, indent=2)
with open(out_dir + '/run_network_config.txt', 'w') as f:
config.write(f)
###################################################
# Plot input data
###################################################
scatter = plot_data(rna_data, side_data)
plt.title("Input Data (" + str(plot_method) + ")")
plt.savefig(out_dir + "/input_scatter.png")
plt.clf()
###################################################
# Train supervised encoder
###################################################
print("Training supervised encoder...")
scaler = StandardScaler().fit(rna_data.values)
norm_rna_data = np.clip(scaler.transform(rna_data.values),-3,3)
reg_kl = tf.keras.regularizers.l2(l2_norm_kl)
kl_model = tf.keras.Sequential([
tf.keras.layers.Dense(rna_hidden_dim_1, activation='relu',
kernel_regularizer=reg_kl, input_shape=(len(gene_list),)),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Dense(rna_hidden_dim_2, activation='relu',
kernel_regularizer=reg_kl),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Dense(rna_latent_dim, activation='sigmoid', name='latent_layer',
kernel_regularizer=reg_kl),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Dense(translate_hidden_dim, activation='relu',
kernel_regularizer=reg_kl),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Dense(side_data.values.shape[1], activation=tf.nn.softmax,
kernel_regularizer=reg_kl, name='relative_prediction')
])
es_cb_kl = tf.keras.callbacks.EarlyStopping('val_kullback_leibler_divergence', patience=100, restore_best_weights=True)
kl_model.compile(optimizer=tf.train.AdamOptimizer(learning_rate_kl), loss=tf.keras.losses.KLD, metrics=['kullback_leibler_divergence'])
kl_model.fit(norm_rna_data, side_data.values, epochs=10000, callbacks=[es_cb_kl], validation_split=0.1, verbose=0, batch_size=1024)
kl_model.fit(norm_rna_data, side_data.values, epochs=5, verbose=0, batch_size=1024)
kl_model.save(out_dir + "/encoder.h5")
###################################################
# Get latent data
###################################################
print("Getting latent data...")
latent_model = tf.keras.Model(inputs=kl_model.input, outputs=kl_model.get_layer('latent_layer').output)
latent = latent_model.predict(norm_rna_data)
latent_df = pd.DataFrame(data=latent, index=sample_list)
latent_df.to_csv("latent_values.tsv", sep="\t")
scatter = plot_data(latent_df, side_data)
plt.title("Latent Data (" + str(plot_method) + ")")
plt.savefig(out_dir + "/latent_scatter.png")
plt.clf()
###################################################
# Train decoder
###################################################
print("Training decoder...")
reg_dec = tf.keras.regularizers.l2(l2_norm_dec)
dec_model = tf.keras.Sequential([
tf.keras.layers.Dense(rna_hidden_dim_2, activation='relu', input_shape=(rna_latent_dim,),
kernel_regularizer=reg_dec, bias_regularizer=reg_dec),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(rna_hidden_dim_1, activation='relu',
kernel_regularizer=reg_dec, bias_regularizer=reg_dec),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(len(gene_list), activation=None, name='rna_reconstruction',
kernel_regularizer=reg_dec, bias_regularizer=reg_dec)
])
es_cb_dec = tf.keras.callbacks.EarlyStopping('val_mean_squared_error', patience=100, restore_best_weights=True)
dec_model.compile(optimizer=tf.train.AdamOptimizer(learning_rate_ae), loss=tf.keras.losses.MSE, metrics=['mse'])
dec_model.fit(latent, norm_rna_data, epochs=100000, callbacks=[es_cb_dec], validation_split=0.1,
verbose=0, batch_size=1024)
dec_model.fit(latent, norm_rna_data, epochs=5, verbose=0, batch_size=1024)
dec_model.save(out_dir + "/decoder.h5")
###################################################
# Get reconstructed values
###################################################
decoded = dec_model.predict(latent)
decoded_df = pd.DataFrame(data=decoded, index=sample_list, columns=gene_list)
decoded_df.to_csv("reconstructed_values.tsv", sep="\t")
scatter = plot_data(decoded_df, side_data)
plt.title("Reconstructed Data (" + str(plot_method) + ")")
plt.savefig(out_dir + "/reconstructed_scatter.png")
plt.clf()
###################################################
# Build CGAN Architecture
###################################################
def xavier_init(size):
in_dim = size[0]
xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
return tf.random_normal(shape=size, stddev=xavier_stddev)
""" Discriminator Net model """
X = tf.placeholder(tf.float32, shape=[None, rna_latent_dim])
y = tf.placeholder(tf.float32, shape=[None, side_data.shape[1]])
D_W1 = tf.Variable(xavier_init([rna_latent_dim + side_data.shape[1], det_dim]))
D_b1 = tf.Variable(tf.zeros(shape=[det_dim]))
D_W2 = tf.Variable(xavier_init([det_dim, 1]))
D_b2 = tf.Variable(tf.zeros(shape=[1]))
theta_D = [D_W1, D_W2, D_b1, D_b2]
def discriminator(x, y):
inputs = tf.concat(axis=1, values=[x, y])
D_h1 = tf.nn.relu(tf.matmul(inputs, D_W1) + D_b1)
D_logit = tf.matmul(D_h1, D_W2) + D_b2
D_prob = tf.nn.sigmoid(D_logit)
return D_prob, D_logit
""" Generator Net model """
Z = tf.placeholder(tf.float32, shape=[None, Z_dim])
G_W1 = tf.Variable(xavier_init([Z_dim + side_data.shape[1], gen_dim1]))
G_b1 = tf.Variable(tf.zeros(shape=[gen_dim1]))
G_W2 = tf.Variable(xavier_init([gen_dim1, gen_dim2]))
G_b2 = tf.Variable(tf.zeros(shape=[gen_dim2]))
G_W3 = tf.Variable(xavier_init([gen_dim2, rna_latent_dim]))
G_b3 = tf.Variable(tf.zeros(shape=[rna_latent_dim]))
theta_G = [G_W1, G_W2, G_W3, G_b1, G_b2, G_b3]
def generator(z, y):
inputs = tf.concat(axis=1, values=[z, y])
G_h1 = tf.nn.relu(tf.matmul(inputs, G_W1) + G_b1)
G_h2 = tf.nn.relu(tf.matmul(G_h1, G_W2) + G_b2)
G_log_prob = tf.matmul(G_h2, G_W3) + G_b3
G_prob = tf.nn.sigmoid(G_log_prob)
return G_log_prob, G_prob
def sample_Z(m, n):
return np.random.uniform(-1., 1., size=[m, n])
###################################################
# Build CGAN Architecture
###################################################
print("Training CGAN...")
reg_dec = tf.keras.regularizers.l2(l2_norm_cgan)
G_sample, G_sample_sigmoid = generator(Z, y)
D_real, D_logit_real = discriminator(X, y)
D_fake, D_logit_fake = discriminator(G_sample, y)
D_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_real, labels=tf.ones_like(D_logit_real)))
D_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.zeros_like(D_logit_fake)))
D_loss = D_loss_real + D_loss_fake
G_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.ones_like(D_logit_fake)))
D_solver = tf.train.AdamOptimizer(learning_rate=learning_rate_cgan).minimize(D_loss, var_list=theta_D)
G_solver = tf.train.AdamOptimizer(learning_rate=learning_rate_cgan).minimize(G_loss, var_list=theta_G)
train_side = side_data.values
train_latent = latent
train = tf.data.Dataset.from_tensor_slices((train_latent, train_side))
train = train.batch(1024)
train = train.shuffle(1000000)
train_iterator = train.make_initializable_iterator()
train_next_element = train_iterator.get_next()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for it in range(epoch_cgan):
sess.run(train_iterator.initializer)
batches = 0
total_g_loss = 0
total_d_loss = 0
while True:
try:
X_mb, y_mb = sess.run(train_next_element)
noise = np.random.normal(loc=0,scale=0.02,size=y_mb.shape[0]*y_mb.shape[1])
y_mb = np.clip(y_mb + noise.reshape(y_mb.shape[0], y_mb.shape[1]),0,1)
sums = np.sum(y_mb,1)
y_mb = y_mb/sums.reshape(-1,1)
batches += 1
Z_sample = sample_Z(X_mb.shape[0], Z_dim)
_, D_loss_curr = sess.run([D_solver, D_loss], feed_dict={X: X_mb, Z: Z_sample, y:y_mb})
_, G_loss_curr, samp = sess.run([G_solver, G_loss, G_sample], feed_dict={Z: Z_sample, y:y_mb})
except tf.errors.OutOfRangeError:
print(" %05d\tGen Loss: %.3f\tDisc Loss: %.3f" % (it, G_loss_curr, D_loss_curr), end="\r")
break
z_sample = sample_Z(side_data.values.shape[0], Z_dim)
gen_data = sess.run(G_sample, feed_dict={Z: z_sample, y:side_data.values})
gen_latent_df = pd.DataFrame(data=gen_data, index=sample_list)
gen_latent_df.to_csv("observed_generated_latent_values.tsv", sep="\t")
scatter = plot_data(gen_latent_df, side_data)
plt.title("Generated Latent Data (" + str(plot_method) + ")")
plt.savefig(out_dir + "/generated_latent_scatter.png")
plt.clf()
gen_decoded = dec_model.predict(gen_data)
gen_decoded_df = | pd.DataFrame(data=gen_decoded, index=sample_list, columns=gene_list) | pandas.DataFrame |
# Package imports
import pandas as pd
import requests
import datetime
from unidecode import unidecode as UnicodeFormatter
import os
import bcolors
# Local imports
import path_configuration
import url_configuration
import progress_calculator
class Season_Info(object):
Url = None
Path = None
Requests = None
def __init__(self):
self.Url = url_configuration.Url_builder()
self.Path = path_configuration.Path()
self.Requests = requests
def import_seasons(self, year_to_find=None):
print(bcolors.PASS + 'STARTING EXTRACTOR, GETTING SEASONS...' + bcolors.END)
url_list = self.Url.url_season(year_to_find)
Progress = progress_calculator.ProgressBar(url_list)
for url in url_list:
Progress.get_progress_bar()
Season = 0
Round = []
GrandPrix = []
CircuitID = []
CircuitName = []
City = []
Country = []
Date = []
Latitude = []
Longitude = []
page = self.Requests.get(url)
json = page.json()
j_temp = json['MRData']
j_temp = j_temp['RaceTable']
circuits_list = j_temp['Races']
for circuit in circuits_list:
Season = circuit['season']
Round.append(circuit['round'])
GrandPrix.append(UnicodeFormatter(circuit['raceName'].replace(' ', '_')))
CircuitID.append(UnicodeFormatter(circuit['Circuit']['circuitId']))
CircuitName.append(UnicodeFormatter(circuit['Circuit']['circuitName']))
City.append(UnicodeFormatter(circuit['Circuit']['Location']['locality']))
Country.append(UnicodeFormatter(circuit['Circuit']['Location']['country']))
Latitude.append(circuit['Circuit']['Location']['lat'])
Longitude.append(circuit['Circuit']['Location']['long'])
Date.append(circuit['date'])
# CONSOLE PT
print(bcolors.WAITMSG + ' Getting Season Data:' + Season + bcolors.END)
Circuit_Data = {'Round': Round, 'Grand Prix': GrandPrix, 'Circuit ID': CircuitID,
'Circuit Name': CircuitName, 'City': City, 'Country': Country, 'Latitude': Latitude,
'Longitude': Longitude, 'Date': Date}
Circuit_DF = | pd.DataFrame(data=Circuit_Data) | pandas.DataFrame |
import tweepy
import pandas as pd
from langdetect import detect
from .sentiment import analyse_per_language
from .datahandler import DataHandler
import gc
class GlobalStreamListener(tweepy.StreamListener):
"""
Twitter listener. collects tweets and stores it to a data-handler
"""
def __init__(self, lan: str,
handler: DataHandler,
update_data_size: int,
max_size: int = 100000,
stream_all: bool = False):
"""
:param lan: the language of the tweets to be collected
:param handler: a data-handler to store the data
:param update_data_size: after how many data to dump on the data-handler
:param max_size: when achieves it, it must empty all lists
:param stream_all: whether to store all tweets or only those with geo-location
"""
super(GlobalStreamListener, self).__init__()
self.lan = lan
# Place type, country, country code και full_name
self.texts = []
self.sentiments = []
self.locations = []
self.created_at = []
self.retweet = []
self.users = []
self.tweet_ids = []
self.place_types = []
self.country = []
self.country_code = []
self.full_name = []
self.handler = handler
self.update_data_size = update_data_size
self.max_size = max_size
self.stream_all = stream_all
def on_status(self, status):
sts = status._json
try:
txt = status.extended_tweet["full_text"]
except AttributeError:
txt = sts['text']
user = sts['user']['id']
user_location = sts["user"]["location"]
created_at = sts['created_at']
tweet_id = str(sts['id'])
is_retweet = txt.lower().startswith("rt @")
if user_location is not None or self.stream_all:
try:
lang = detect(txt)
if lang == self.lan and txt not in self.texts:
self.locations.append(user_location)
self.sentiments.append(analyse_per_language(txt, self.lan)["compound"])
self.created_at.append(created_at)
self.texts.append(txt)
self.retweet.append(is_retweet)
self.users.append(user)
self.tweet_ids.append(tweet_id)
if sts['place']:
self.country.append(sts['place'].get('country'))
self.country_code.append(sts['place'].get('country_code'))
self.place_types.append(sts['place'].get('place_type'))
self.full_name.append(sts['place'].get('full_name'))
else:
self.country.append(None)
self.country_code.append(None)
self.place_types.append(None)
self.full_name.append(None)
if self.get_size_of_data() % self.update_data_size == 0:
self.dump_data()
except:
print(f"Could not detect the language for: {txt}")
#todo: add to logger
def get_size_of_data(self):
return len(self.texts)
def get_last_results(self, num_of_results=10):
return {'sentiment': self.sentiments[-num_of_results:],
'tweet_id': self.tweet_ids[-num_of_results:],
'text': self.texts[-num_of_results:],
'user_location': self.locations[-num_of_results:],
'created_at': self.created_at[-num_of_results:],
'is_retweet': self.retweet[-num_of_results:],
'user': self.users[-num_of_results:],
'place_type': self.place_types[-num_of_results:],
'country': self.country[-num_of_results:],
'country_code': self.country_code[-num_of_results:],
'full_name': self.full_name[-num_of_results:]}
def dump_data(self):
buffered_data = self.get_last_results(num_of_results=self.update_data_size)
df = | pd.DataFrame.from_dict(buffered_data) | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
"""
Simulate elections.
Elements of an election
1. Create voter preferences
- Create voter preference distributions
- Create voter preference tolerance distribution
2. Create candidate preferences
3. Simulate voter behavior, strategy
4. Transform voter preferences into candidate scores or rankings
5. Input scores/ranks into election system.
6. Run the election.
7. Measure the results.
Object Data Transfer Model
--------------------------
Voters --> VoterGroup
Voters/VoterGroup --> Candidates
Voters, VoterGroup, Candidates --> Election
To construct models or benchmarks, start by creating object `Voters`.
`Voters` may have various properties such as preference,
voter strategy parameters, tolerance circles, etc. Define these
properties in Voters. Voters can be segregated by groups,
and each group may have different properties. `VoterGroup` is used to
define groups of several `Voters`.
After defining voters, candidates may be defined using class
`Candidate`. `Candidate` definition may be dependent on the voters population,
therefore `Candidate` accepts voters as an argument.
With the voters and candidates define, an election can be generated with
`Election`. `Election` has many subclasses which run the election.
- `BallotGenerator` takes voter and candidate information to generate honest
and tactical ballots.
- `eRunner` handles the running of specific types of elections.
- `ElectionResult` handles the storage of output data.
"""
import pickle
import copy
from typing import List
from collections import namedtuple
import numpy as np
import pandas as pd
import scipy
from scipy.stats import truncnorm
from votesim import metrics
from votesim import ballot
from votesim import votemethods
from votesim import utilities
from votesim.models import vcalcs
from votesim.strategy import TacticalBallots, FrontRunners
__all__ = [
'Voters',
'VoterGroup',
'Candidates',
'Election'
]
# Base random seeds
VOTERS_BASE_SEED = 2
CLIMIT_BASE_SEED = 3
CANDIDATES_BASE_SEED = 4
ELECTION_BASE_SEED = 5
#import seaborn as sns
import logging
logger = logging.getLogger(__name__)
def ltruncnorm(loc, scale, size, random_state=None):
"""
Truncated normal random numbers, cut off at locations less than 0.
Parameters
-----------
loc : float
Center coordinate of gaussian distribution
scale : float
Std deviation scale
size : int
Number of random numbers to generate
random_state : None or numpy.random.RandomState
Random number seeding object, or None.
Returns
---------
out : array shaped (size)
Output samples
"""
if scale == 0:
return np.ones(size) * loc
xmin = -loc / scale
t = truncnorm(xmin, 1e6)
s = t.rvs(size=size, random_state=random_state)
s = s * scale + loc
return s
def gaussian_preferences(coords, sizes, scales, rstate=None):
"""
Generate gaussian preference distributions at coordinate and specified size
Parameters
----------
coords : array shaped (a, b)
Centroids of a faction voter preferences.
- rows `a` = coordinate for each faction
- columns `b' = preference dimensions. The more columns, the more preference dimensions.
sizes : array shaped (a,)
Number of voters within each faction, with a total of `a` factions.
Use this array to specify how many people are in each faction.
scales : array shaped (a, b)
The preference spread, width, or scale of the faction. These spreads
may be multidimensional. Use columns to specify additional dimensions.
Returns
-------
out : array shaped (c, b)
Population preferences of `c` number of voters in `b` preference dimensions.
"""
if rstate is None:
rstate = np.random.RandomState
new = []
coords = np.atleast_2d(coords)
ndim = coords.shape[1]
for center, size, scale in zip(coords, sizes, scales):
logger.debug('size=%s', size)
pi = rstate.normal(loc=center,
scale=scale,
size=(size, ndim))
new.append(pi)
new = np.vstack(new)
return new
def _RandomState(seed, level=1):
"""
Create random state.
Generate multiple random statse from a single seed, by specifying
different levels for different parts of Election.
Parameters
----------
seed : int
Integer seed
level : int
Anoter integer seed.
"""
if seed is None:
return np.random.RandomState()
else:
return np.random.RandomState((seed, level))
_VoterData = namedtuple(typename='VoterData',
field_names=['action_record',
'strategy',
'pref',
'electionStats',
'weights',
'order'],
defaults=[None]*4
)
_CandidateData = namedtuple(typename='CandidateData',
field_names=['pref'],
defaults=[None])
class CandidateData(_CandidateData):
pass
class VoterData(_VoterData):
pass
class Voters(object):
"""Create simple normal distribution of voters.
Parameters
----------
seed : int or None
Integer seed for pseudo-random generation. None for random numbers.
strategy : dict
Voter regret-to-ratings conversion strategy.
stol : float (default 1.0)
Tolerance factor for strategy
Features
--------
Score & ratings are constructed based on candidate coordinates
Attributes
----------
pref : array shape (a, b)
Voter preferences, `a` number of voters, `b` number of preference dimensions
strategy : dict
Container for strategy options with keys
tol : float
Voter preference tolerance
base : str
Base honest ballot type
tactics : list of str
Tactic methods to apply onto ballot.
See `votesim.ballot.TacticalBallots` for available tactics.
onesided : bool
Use onesided ballot, or use full strategic ballot.
iteration : int
Numbers of iterations of strategy to undergo.
"""
def __init__(self, seed=None, strategy: dict=None, order=1):
self.init(seed, order=order)
if strategy is None:
strategy = {}
self.set_strategy(**strategy)
return
@utilities.recorder.record_actions(replace=True)
def init(self, seed, order: int):
"""Set pseudorandom seed & distance calculation order."""
self.seed = seed
self._randomstate = _RandomState(seed, VOTERS_BASE_SEED)
self.order = order
#self._randomstate2 = _RandomState(seed, CLIMIT_BASE_SEED)
return
@utilities.recorder.record_actions(replace=True)
def set_strategy(self,
tol=None,
base='linear',
iterations=1,
tactics: List[str]=(),
subset='',
ratio=1.0,
frontrunnertype='tally',
frontrunnernum=2,
frontrunnertol=0.0,
):
"""Set voter strategy type."""
self.strategy = {}
self.strategy['tol'] = tol
self.strategy['base'] = base
self.strategy['tactics'] = tactics
self.strategy['subset'] = subset
self.strategy['ratio'] = ratio
self.strategy['frontrunnertype'] = frontrunnertype
self.strategy['frontrunnernum'] = frontrunnernum
self.strategy['frontrunnertol'] = frontrunnertol
if len(tactics) == 0:
iterations = 0
self.strategy['iterations'] = iterations
@utilities.recorder.record_actions()
def add_random(self, numvoters, ndim=1, loc=None):
"""Add random normal distribution of voters.
Parameters
----------
numvoters : int
Number of voters to generate
ndim : int
Number of preference dimensions of population
loc : array shaped (ndim,)
Coordinate of voter centroid
"""
rs = self._randomstate
center = np.zeros(ndim)
voters = rs.normal(center, size=(numvoters, ndim))
if loc is not None:
voters = voters + loc
self._add_voters(voters)
return
@utilities.recorder.record_actions()
def add_points(self, avgnum, pnum, ndim=1):
"""Add a random point with several clone voters at that point.
Parameters
----------
avgnum : int
Avg. Number of voters per unique point
pnum : int
Number of unique points
ndim : int
Number of dimensions
"""
rs = self._randomstate
center = np.zeros(ndim)
for i in range(pnum):
# coordinate of point
point = rs.normal(center, size=(1, ndim))
# number of voters at the point
voternum = ltruncnorm(1, 1, 1) * avgnum
voternum = int(voternum)
voters = np.ones((voternum, ndim)) * point
self._add_voters(voters)
return
@utilities.recorder.record_actions()
def add(self, pref):
"""Add arbitrary voters.
Parameters
----------
pref : array shape (a, b)
Voter preferences, `a` is number of voters, `b` pref. dimensions.
"""
self._add_voters(pref)
pass
def _add_voters(self, pref):
"""Base function for adding 2d array of candidates to election."""
try:
pref = np.row_stack((self.pref, pref))
except AttributeError:
pref = np.atleast_2d(pref)
self.pref = pref
self._ElectionStats = metrics.ElectionStats(voters=self)
return
def calculate_distances(self, candidates):
"""Preference distances of candidates from voters.
Parameters
----------
candidates : array shaped (a, b)
Candidate preference data
"""
pref = self.pref
try:
weights = self.weights
except AttributeError:
weights = None
distances = vcalcs.voter_distances(voters=pref,
candidates=candidates,
weights=weights,
order=self.order)
return distances
def honest_ballots(self, candidates):
"""Honest ballots calculated from Candidates."""
distances = self.calculate_distances(candidates.pref)
b = ballot.gen_honest_ballots(distances=distances,
tol=self.strategy['tol'],
base=self.strategy['base'])
return b
@property
def electionStats(self) -> metrics.ElectionStats:
return self._ElectionStats
def reset(self):
"""Reset method records. Delete voter preferences and records."""
try:
self._method_records.reset()
except AttributeError:
pass
try:
del self.pref
except AttributeError:
pass
return
def copy(self):
return copy.deepcopy(self)
# def split(self, ratios):
# """Split Voter into multiple voter groups"""
# if hasattr(self, 'weights'):
# raise NotImplementedError('Split function not implemented for self.weight')
# vsum = np.sum(ratios, dtype=float)
# voter_num = len(self.pref)
# i1 = 0
# for ratio in ratios[0 : -1]:
# fraction = ratio / vsum
# num = int(np.round(fraction * voter_num))
# i2 = i1 + num
# pref_ii = self.pref[i1 : i2]
# i1 = i2
# vnew = self.copy()
# vnew.pref = pref_ii
class VoterGroup(object):
"""Group together multiple voter objects & interact with candidates.
Parameters
----------
voters_list : list[Voters]
List of Voters
Attributes
----------
group : list[Voters]
Same as voters_list
"""
def __init__(self, voters_list: List[Voters]):
try:
iter(voters_list)
except Exception:
voters_list = [voters_list]
self.group = voters_list
orders = np.array([v.order for v in self.group])
if not np.all(orders == orders[0]):
raise ValueError('Order of voters in group must all be same.')
self.order = orders[0]
return
@utilities.lazy_property
def pref(self):
vlist = [v.pref for v in self.group]
return np.vstack(vlist)
@utilities.lazy_property
def electionStats(self):
return metrics.ElectionStats(voters=self)
def reset(self):
for voter in self.group:
voter.reset()
def __getitem__(self, key):
return self.group[key]
def voter_group(vlist):
"""Group together multiple Voters."""
if hasattr(vlist, 'group'):
return vlist
else:
return VoterGroup(vlist)
class Candidates(object):
"""
Create candidates for spatial model
Parameters
-----------
voters : `Voters` or `VoterGroup`
Voters to draw population data.
Attributes
----------
pref : array shape (a, b)
Voter preferences, `a` number of candidates,
`b` number of preference dimensions
"""
def __init__(self, voters: Voters, seed=None):
self._method_records = utilities.recorder.RecordActionCache()
if not hasattr(voters, '__len__'):
voters = [voters]
self.voters = voter_group(voters)
self.set_seed(seed)
return
@utilities.recorder.record_actions()
def set_seed(self, seed):
""" Set pseudorandom seed """
self._seed = (seed, CANDIDATES_BASE_SEED)
self._randomstate = _RandomState(*self._seed)
return
def _add_candidates(self, candidates):
"""Base function for adding 2d array of candidates to election"""
candidates = np.array(candidates)
assert candidates.ndim == 2, 'candidates array must have ndim=2'
try:
candidates = np.row_stack((self.candidates, candidates))
except AttributeError:
candidates = np.atleast_2d(candidates)
cdim = candidates.shape[1]
vdim = self.voters.pref.shape[1]
condition = cdim == vdim
s = ('dim[1] of candidates (%s) '
'must be same as dim[1] (%s) of self.voters' % (cdim, vdim))
assert condition, s
self.pref = candidates
return
def reset(self):
"""Reset candidates for a given Voters.
Delete candidate preferences and records"""
try:
self._method_records.reset()
except AttributeError:
pass
try:
del self.pref
except AttributeError:
pass
return
@utilities.recorder.record_actions()
def add_random(self, cnum, sdev=2):
"""
Add random candidates, uniformly distributed.
Parameters
----------
cnum : int
Number of candidates for election
sdev : float
+- Width of standard deviations to set uniform candidate
generation across voter population
"""
rs = self._randomstate
std = self.voters.electionStats.voter.pref_std
mean = self.voters.electionStats.voter.pref_mean
ndim = std.shape[0]
candidates = rs.uniform(low = -sdev*std,
high = sdev*std,
size = (cnum, ndim)) + mean
self._add_candidates(candidates)
return
@utilities.recorder.record_actions()
def add(self, candidates):
"""Add 2d array of candidates to election, record actions
Parameters
----------------
candidates : array shape (a, n)
Candidate preference coordinates.
- a = number of candidates
- n = number of preference dimensions
"""
self._add_candidates(candidates)
return
@utilities.recorder.record_actions()
def add_median(self,):
"""Add candidate located at voter median coordinate"""
median = self._stats['voter.median']
self._add_candidates(median)
@utilities.recorder.record_actions()
def add_faction(self, vindex):
"""
Add a candidate lying on the centroid of a faction generated using
Voters.add_faction.
Parameters
----------
vindex : int
Index of faction, found in self.voter_ags['coords']
"""
coords = self.voters.fcoords[vindex]
self._add_candidates(coords)
return
# def get_ballots(self, etype):
# return self.voters.tactical_ballots(etype)
class BallotGenerator(object):
"""
Generate ballots from voter and candidate data.
Parameters
----------
voters_list : list of Voter or VoterGroup
Voters of election
candidates : Candidates
Candidates of election
"""
def __init__(self, voters_list: VoterGroup, candidates: Candidates):
self.candidates = candidates
self.group = voter_group(voters_list).group
@utilities.lazy_property
def honest_ballots(self) -> ballot.CombineBallots:
"""Combined honest ballots for all voters in all groups."""
logger.info('Constructing honest ballots.')
blist = [v.honest_ballots(self.candidates) for v in self.group]
new = ballot.CombineBallots(blist)
return new
def ballots(self,
etype: str,
ballots=None,
result: "ElectionResult"=None) -> TacticalBallots:
"""Generate ballots according specified voter strategy.
One-sided index information for `self.index_dict` is also constructed
when tactical ballots are constructed.
Parameters
----------
etype : str
Election type
ballots : ballot subclass
Optional, Initial ballots
erunner : eRunner class
Optional, Previous election runnner if available.
Returns
-------
out : TacticalBallots
Ballots used for election
"""
#indices = self.honest_ballots.children_indices
#maxiter = max(v.strategy['iterations'] for v in self.group)
if ballots is None:
b0 = self.honest_ballots
else:
b0 = ballots
if self.is_all_honest_voters():
return b0
logger.info('Constructing tactical ballots')
# Retrieve initial front runners
# frontrunners_init = b
# erunner = b0.erunner
self.clean_index()
b = TacticalBallots(etype, ballots=b0, result=result)
indices = self.index_dict_tactical
# Set tactics for each group
# for jj, vindex in enumerate(indices):
for jj, (key, vindex) in enumerate(indices.items()):
voters = self.group[jj]
strategy = voters.strategy
# iterations = strategy['iterations']
# if ii < iterations:
b.set(tactics=strategy['tactics'],
subset=strategy['subset'],
frontrunnernum=strategy['frontrunnernum'],
frontrunnertype=strategy['frontrunnertype'],
frontrunnertol=strategy['frontrunnertol'],
index=vindex
)
# Record group index locations for one-sided tactics
# if ii == iterations - 1:
# if strategy['onesided'] == True:
name = str(jj) + '-tactical-underdog'
self.index_dict[name] = np.where(b.iloc_bool_underdog)[0]
name = str(jj) + '-tactical-topdog'
self.index_dict[name] = np.where(b.iloc_bool_topdog)[0]
# To perform next iteration, set the base ballot to the newly
# constructed tactical ballots
# b0 = b
return b
def is_all_honest_voters(self):
"""bool : Determine if all voter groups are honest."""
for voter in self.group:
if len(voter.strategy['tactics']) > 0:
return False
return True
@utilities.lazy_property
def index_dict(self):
"""dict : Index locations of voters for each group.
If one-sided tactical ballots are generated, index locations for
'-topdog' and '-underdog' voters are also included."""
d = self.index_dict_groups.copy()
for key, value in self.index_dict_tactical.items():
d[key + '-tactical'] = value
for key, value in self.index_dict_honest.items():
d[key + '-honest'] = value
return d
@utilities.lazy_property
def index_dict_groups(self):
"""dict : Index locations of voters for each group.
If one-sided tactical ballots are generated, index locations for
'-topdog' and '-underdog' voters are also included."""
indices = self.honest_ballots.children_indices
index_dict = {}
for ii, index in enumerate(indices):
index_dict[str(ii)] = index
#self._index_dict = index_dict
return index_dict
@property
def index_dict_tactical(self):
return self._index_dict_tactical_honest[0]
@property
def index_dict_honest(self):
return self._index_dict_tactical_honest[0]
@utilities.lazy_property
def _index_dict_tactical_honest(self):
"""Calculate index locations of tactical voters and honest voters for
each group."""
dict_tactical= {}
dict_honest = {}
group_num = len(self.group)
for ii in range(group_num):
group = self.group[ii]
slicei = self.honest_ballots.children_indices[ii]
starti = slicei.start
stopi = slicei.stop
strategy = group.strategy
voter_num = len(group.pref)
try:
ratio = strategy['ratio']
except KeyError:
# Assume 100% strategic voters if ratio not found.
ratio = 1.0
if len(strategy['tactics']) > 0:
strat_voter_num = int(np.round(ratio * voter_num))
endi = starti + strat_voter_num
index_tactical = np.arange(starti, endi)
index_honest = np.arange(endi, stopi)
else:
index_tactical = np.array([], dtype=int)
index_honest = np.arange(starti, stopi)
dict_tactical[str(ii)] = index_tactical
dict_honest[str(ii)] = index_honest
return dict_tactical, dict_honest
def reset(self):
utilities.clean_lazy_properties(self)
def clean_index(self):
names = ['index_dict',
'_index_dict_tactical_honest',
'index_dict_groups']
utilities.clean_some_lazy_properties(self, names)
@property
def distances(self):
"""(a, b) array: `a` Voter preference distances from `b` candidates."""
return self.honest_ballots.distances
def __getitem__(self, key):
return self.group[key]
class Election(object):
"""
Run an Election with Voters and Candidates
Parameters
------------
voters : None, Voters, VoterGroup, or list of Voters
Voters object specifying the voter preferences and behavior.
candidate : None or Candidates
Candidates object specifying candidate preferences
seed : int or None
Seed for pseudo-random number generation
numwinners : int >= 1
Number of winners for the election
scoremax : int
Maximum score for ballot generation
name : str
Name of election model, used to identify different benchmark models.
save_args : bool (default True)
- If True, save all parameters input into method calls. These
parameters can be used to regenerate specific elections.
- If False, only save parameters input into `self.user_data`.
Attributes
----------
result : ElectionResult
Results storage for Election.
ballotgen : BallotGenerator
VoterBallot data
"""
def __init__(self,
voters: VoterGroup=None,
candidates: Candidates=None,
seed=None,
numwinners=1,
scoremax=5,
name = '',
save_args=True):
self._method_records = utilities.recorder.RecordActionCache()
#self._result_history = []
self.voters = None
self.candidates = None
self.save_args = save_args
self.init(seed, numwinners, scoremax, name)
self.set_models(voters, candidates)
self._result_calc = ElectionResultCalc(self)
return
@utilities.recorder.record_actions(replace=True)
def init(self, seed, numwinners, scoremax, name):
"""Initialize some election properties"""
self._set_seed(seed)
self.numwinners = numwinners
self.scoremax = scoremax
self.name = name
return
def set_models(self, voters=None, candidates=None):
"""Set new voter or candidate model.
Parameters
----------
voters : Voters or None
New voters object
candidates : Candidates or None
New candidates object
"""
if voters is not None:
self.voters = voter_group(voters)
self.electionStats = self.voters.electionStats
if candidates is not None:
self.candidates = candidates
self.electionStats.set_data(candidates=candidates)
# self.electionStats.set_data(candidates=self.candidates.pref,)
if voters is not None:
self.ballotgen = BallotGenerator(self.voters, self.candidates)
return
def _set_seed(self, seed):
""" Set pseudorandom seed """
if seed is None:
self._seed = None
self._randomstate = _RandomState(None)
else:
self._seed = (seed, ELECTION_BASE_SEED)
self._randomstate = _RandomState(*self._seed)
return
def user_data(self, d=None, **kwargs):
"""Record any additional data the user wishes to record.
Parameters
----------
**d : dict
Write any keys and associated data here
"""
udict = {}
udict.update(kwargs)
if d is not None:
# d is supposed to be a dictionary. Try to update our dict with it
try:
udict.update(d)
# Maybe the user is trying to create a parameter `d`
except TypeError:
udict['d'] = d
self._user_data = udict
return
def reset(self):
"""Delete election data for the current run --
voter preferences, candidate preferences, and ballots,
Clear the kind of data that can be regenerated if desired.
Do not clear statistics.
"""
self.voters.reset()
self.candidates.reset()
def delete(a):
try:
delattr(self, a)
except AttributeError:
pass
delete('winners')
delete('ties')
delete('output')
delete('vballots')
raise NotImplementedError('This function probably doesnt work.')
return
@utilities.recorder.record_actions(replace=True,
exclude=['ballots', 'erunner'])
def run(self, etype=None, method=None,
btype=None, ballots=None, result=None):
"""Run the election using `votemethods.eRunner`.
Parameters
----------
etype : str
Election method. Either `etype` or `method` must be input.
method : func
Election method function
btype : str
Ballot type. If `method` used,
ballots : Ballots
Initial ballots to be used in election.
election : Election
Election, you can input honest election
using this object to reduce repetitive computation cost.
"""
logger.debug('Running %s, %s, %s', etype, method, btype)
ballots = self.ballotgen.ballots(etype=etype,
ballots=ballots,
result=result)
runner = ballots.run(etype=etype,
rstate=self._randomstate,
numwinners=self.numwinners)
self._result_calc.update(runner)
self.used_ballots = ballots
self.result = ElectionResult(self)
return self.result
def rerun(self, d):
"""Re-run an election found in dataframe. Find the election
data from the dataframe index
Parameters
----------
index : int or None
Election index from self._dataframe
d : dict or None
Dictionary or Series of election data, generated from self.dataseries.
Returns
-------
out : Election
Newly constructed election object with re-run parameters.
"""
series = d
def filterdict(d, kfilter):
new = {}
num = len(kfilter)
for k, v in d.items():
if k.startswith(kfilter):
newkey = k[num :]
new[newkey] = v
return new
filter_key = 'args.candidate.'
c_dict = filterdict(series, filter_key)
filter_key = 'args.election.'
e_dict = filterdict(series, filter_key)
vnum = len(self.voters.group)
new_voters = []
for ii in range(vnum):
filter_key = 'args.voter-%s.' % ii
v_dict = filterdict(series, filter_key)
v = type(self.voters.group[ii])()
#v = type(self.voters)()
v._method_records.reset()
v._method_records.run_dict(v_dict, v)
new_voters.append(v)
c = type(self.candidates)(voters=new_voters)
c._method_records.reset()
c._method_records.run_dict(c_dict, c)
enew = Election(voters=v, candidates=c)
enew._method_records.run_dict(e_dict, enew)
return enew
def copy(self) -> 'Election':
"""Copy election"""
return copy.copy(self)
def save(self, name, reset=True):
"""Pickle election data
Parameters
----------
name : str
Name of new pickle file to dump Election ito
reset : bool
If True (default), delete election data that can be regenerated.
"""
if reset:
self.reset()
with open(name, 'wb') as file1:
pickle.dump(self, file1)
return
def dataseries(self, index=None):
"""Retrieve pandas data series of output data."""
return self._result_calc.dataseries(index=index)
def dataframe(self):
"""Construct data frame from results history."""
return self._result_calc.dataframe()
def append_stat(self, d: metrics.BaseStats, name='', update_docs=False):
return self._result_calc.append_stat(d=d,
name=name,
update_docs=update_docs)
class ElectionResultCalc(object):
"""
Store Election result output. Generated as attribute of Election.
This is a sort of messy back-end that does all the calculations. The
result front end is `ElectionResult`.
Parameters
----------
e : Election
Election to extract results from.
Attributes
----------
runner : :class:`~votesim.votemethods.voterunner.eRunner`
Output from election running class for the last run election.
results : dict
Results of last run election key prefixes:
- 'output.*' -- Prefix for election output results
- 'args.etype' -- Election method
- 'args.voter.*' -- Voter input arguments
- 'args.election.*' -- Election input arguments
- 'args.user.*' -- User defined input arguments
Output Specification
--------------------
For each election output keys are generated as dataframes or dataseries.
- Voter parameters are specified as `args.voter-vnum.a.func.argname`
- `vnum` = Voter group number
- `a` = Method call number (a method could be called multiple times.)
- `func` = Name of the called method
- `argname` = Name of the set parameter for the method.
- Candidate parameters are specified as `args.candidate.a.func.arg`
- User parameters are specified as `args.user.name`
- `name` is the user's inputted parameter name
"""
def __init__(self, e: Election):
self.election = e
self.save_args = e.save_args
# Store results as list of dict
self._output_history = []
pass
def update(self, runner: votemethods.eRunner):
"""Get election results."""
self.runner = runner
self.winners = runner.winners
self.ties = runner.ties
self.ballots = runner.ballots
return self._get_results()
def _get_results(self):
"""Retrieve election statistics and post-process calculations."""
stats = self._electionStats
stats.set_data(election=self.election)
### Build dictionary of all arguments and results
results = {}
results.update(self._get_parameters())
results['output'] = stats.get_dict()
results = utilities.misc.flatten_dict(results, sep='.')
self.output = results
self._output_history.append(results)
return self
def _get_parameter_keys(self) -> list:
"""Retrieve election input parameter keys."""
return list(self._get_parameters().keys())
def _get_parameters(self) -> dict:
"""Retrieve election input parameters."""
params = {}
candidates = self.election.candidates
voters = self.election.voters
election = self.election
# get candidate parameters
crecord = candidates._method_records.dict
# get voter parameters
vrecords = []
for v in voters.group:
vrecords.append(v._method_records.dict)
# get election parameters
erecord = election._method_records.dict
# Retrieve user data
# Determine if user data exists. If not, save default save_args
save_args = self.save_args
try:
userdata = self.election._user_data
if len(userdata) == 0:
save_args = True
except AttributeError:
save_args = True
userdata = {}
# Add user data to params
for key, value in userdata.items():
newkey = 'args.user.' + key
params[newkey] = value
# Save etype and name in special parameters
for key in erecord:
if 'run.etype' in key:
params['args.etype'] = erecord[key]
elif '.init.name' in key:
params['args.name'] = erecord[key]
# Save all method call arguments
if self.save_args or save_args:
params['args.candidate'] = crecord
for ii, vrecord in enumerate(vrecords):
params['args.voter-%s' % ii] = vrecord
params['args.election'] = erecord
params = utilities.misc.flatten_dict(params, sep='.')
return params
@utilities.lazy_property
def output_docs(self) -> dict:
"""Retrieve output documentation."""
docs = self._electionStats.get_docs()
docs = utilities.misc.flatten_dict(docs, sep='.')
return docs
@property
def _electionStats(self) -> metrics.ElectionStats:
return self.election.electionStats
def dataseries(self, index=None):
"""Retrieve pandas data series of output data."""
if index is None:
return pd.Series(self.output)
else:
return | pd.Series(self._output_history[index]) | pandas.Series |
import os
import random
import math
import numpy as np
import pandas as pd
import itertools
from functools import lru_cache
##########################
## Compliance functions ##
##########################
def delayed_ramp_fun(Nc_old, Nc_new, t, tau_days, l, t_start):
"""
t : timestamp
current date
tau : int
number of days before measures start having an effect
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * (t-t_start-tau_days)/pd.Timedelta('1D')
def ramp_fun(Nc_old, Nc_new, t, t_start, l):
"""
t : timestamp
current date
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * (t-t_start)/pd.Timedelta('1D')
###############################
## Mobility update functions ##
###############################
def load_all_mobility_data(agg, dtype='fractional', beyond_borders=False):
"""
Function that fetches all available mobility data and adds it to a DataFrame with dates as indices and numpy matrices as values. Make sure to regularly update the mobility data with the notebook notebooks/preprocessing/Quick-update_mobility-matrices.ipynb to get the data for the most recent days. Also returns the average mobility over all available data, which might NOT always be desirable as a back-up mobility.
Input
-----
agg : str
Denotes the spatial aggregation at hand. Either 'prov', 'arr' or 'mun'
dtype : str
Choose the type of mobility data to return. Either 'fractional' (default), staytime (all available hours for region g spent in h), or visits (all unique visits from region g to h)
beyond_borders : boolean
If true, also include mobility abroad and mobility from foreigners
Returns
-------
all_mobility_data : pd.DataFrame
DataFrame with datetime objects as indices ('DATE') and np.arrays ('place') as value column
average_mobility_data : np.array
average mobility matrix over all available dates
"""
### Validate input ###
if agg not in ['mun', 'arr', 'prov']:
raise ValueError(
"spatial stratification '{0}' is not legitimate. Possible spatial "
"stratifications are 'mun', 'arr', or 'prov'".format(agg)
)
if dtype not in ['fractional', 'staytime', 'visits']:
raise ValueError(
"data type '{0}' is not legitimate. Possible mobility matrix "
"data types are 'fractional', 'staytime', or 'visits'".format(dtype)
)
### Load all available data ###
# Define absolute location of this file
abs_dir = os.path.dirname(__file__)
# Define data location for this particular aggregation level
data_location = f'../../../data/interim/mobility/{agg}/{dtype}'
# Iterate over all available interim mobility data
all_available_dates=[]
all_available_places=[]
directory=os.path.join(abs_dir, f'{data_location}')
for csv in os.listdir(directory):
# take YYYYMMDD information from processed CSVs. NOTE: this supposes a particular data name format!
datum = csv[-12:-4]
# Create list of datetime objects
all_available_dates.append(pd.to_datetime(datum, format="%Y%m%d"))
# Load the CSV as a np.array
if beyond_borders:
place = pd.read_csv(f'{directory}/{csv}', index_col='mllp_postalcode').values
else:
place = pd.read_csv(f'{directory}/{csv}', index_col='mllp_postalcode').drop(index='Foreigner', columns='ABROAD').values
if dtype=='fractional':
# make sure the rows sum up to 1 nicely again after dropping a row and a column
place = place / place.sum(axis=1)
# Create list of places
all_available_places.append(place)
# Create new empty dataframe with available dates. Load mobility later
df = pd.DataFrame({'DATE' : all_available_dates, 'place' : all_available_places}).set_index('DATE')
all_mobility_data = df.copy()
# Take average of all available mobility data
average_mobility_data = df['place'].values.mean()
return all_mobility_data, average_mobility_data
class make_mobility_update_function():
"""
Output the time-dependent mobility function with the data loaded in cache
Input
-----
proximus_mobility_data : DataFrame
Pandas DataFrame with dates as indices and matrices as values. Output of mobility.get_proximus_mobility_data.
proximus_mobility_data_avg : np.array
Average mobility matrix over all matrices
"""
def __init__(self, proximus_mobility_data, proximus_mobility_data_avg):
self.proximus_mobility_data = proximus_mobility_data
self.proximus_mobility_data_avg = proximus_mobility_data_avg
@lru_cache()
# Define mobility_update_func
def __call__(self, t, default_mobility=None):
"""
time-dependent function which has a mobility matrix of type dtype for every date.
Note: only works with datetime input (no integer time steps). This
Input
-----
t : timestamp
current date as datetime object
states : str
formal necessity
param : str
formal necessity
default_mobility : np.array or None
If None (default), returns average mobility over all available dates. Else, return user-defined mobility
Returns
-------
place : np.array
square matrix with mobility of type dtype (fractional, staytime or visits), dimension depending on agg
"""
t = pd.Timestamp(t.date())
try: # if there is data available for this date (if the key exists)
place = self.proximus_mobility_data['place'][t]
except:
if default_mobility: # If there is no data available and a user-defined input is given
place = self.default_mobility
else: # No data and no user input: fall back on average mobility
place = self.proximus_mobility_data_avg
return place
def mobility_wrapper_func(self, t, states, param, default_mobility=None):
t = pd.Timestamp(t.date())
if t <= pd.Timestamp('2020-03-17'):
place = self.__call__(t, default_mobility=default_mobility)
return np.eye(place.shape[0])
else:
return self.__call__(t, default_mobility=default_mobility)
###################
## VOC functions ##
###################
class make_VOC_function():
"""
Class that returns a time-dependant parameter function for COVID-19 SEIRD model parameter alpha (variant fraction).
Current implementation includes the alpha - delta strains.
If the class is initialized without arguments, a logistic model fitted to prelevance data of the alpha-gamma variant is used. The class can also be initialized with the alpha-gamma prelavence data provided by Prof. <NAME>.
A logistic model fitted to prelevance data of the delta variant is always used.
Input
-----
*df_abc: pd.dataFrame (optional)
Alpha, Beta, Gamma prelevance dataset by <NAME>, obtained using:
`from covid19model.data import VOC`
`df_abc = VOC.get_abc_data()`
`VOC_function = make_VOC_function(df_abc)`
Output
------
__class__ : function
Default variant function
"""
def __init__(self, *df_abc):
self.df_abc = df_abc
self.data_given = False
if self.df_abc != ():
self.df_abc = df_abc[0] # First entry in list of optional arguments (dataframe)
self.data_given = True
@lru_cache()
def VOC_abc_data(self,t):
return self.df_abc.iloc[self.df_abc.index.get_loc(t, method='nearest')]['baselinesurv_f_501Y.V1_501Y.V2_501Y.V3']
@lru_cache()
def VOC_abc_logistic(self,t):
# Parameters obtained by fitting logistic model to weekly prevalence data
t_sig = pd.Timestamp('2021-02-14')
k = 0.07
# Function to return the fraction of the delta-variant
return 1/(1+np.exp(-k*(t-t_sig)/pd.Timedelta(days=1)))
@lru_cache()
def VOC_delta_logistic(self,t):
# Parameters obtained by fitting logistic model to weekly prevalence data
t_sig = pd.Timestamp('2021-06-25')
k = 0.11
# Function to return the fraction of the delta-variant
return 1/(1+np.exp(-k*(t-t_sig)/pd.Timedelta(days=1)))
# Default VOC function includes British and Indian variants
def __call__(self, t, states, param):
# Convert time to timestamp
t = pd.Timestamp(t.date())
# Introduction Indian variant
t1 = pd.Timestamp('2021-05-01')
# Construct alpha
if t <= t1:
if self.data_given:
return np.array([1-self.VOC_abc_data(t), self.VOC_abc_data(t), 0])
else:
return np.array([1-self.VOC_abc_logistic(t), self.VOC_abc_logistic(t), 0])
else:
return np.array([0, 1-self.VOC_delta_logistic(t), self.VOC_delta_logistic(t)])
###########################
## Vaccination functions ##
###########################
from covid19model.data.model_parameters import construct_initN
class make_vaccination_function():
"""
Class that returns a two-fold time-dependent parameter function for the vaccination strategy by default. First, first dose data by sciensano are used. In the future, a hypothetical scheme is used. If spatial data is given, the output consists of vaccination data per NIS code.
Input
-----
df : pd.dataFrame
*either* Sciensano public dataset, obtained using:
`from covid19model.data import sciensano`
`df = sciensano.get_sciensano_COVID19_data(update=False)`
*or* public spatial vaccination data, obtained using:
`from covid19model.data import sciensano`
`df = sciensano.get_public_spatial_vaccination_data(update=False,agg='arr')`
spatial : Boolean
True if df is spatially explicit. None by default.
Output
------
__class__ : function
Default vaccination function
"""
def __init__(self, df, age_classes=pd.IntervalIndex.from_tuples([(0,12),(12,18),(18,25),(25,35),(35,45),(45,55),(55,65),(65,75),(75,85),(85,120)], closed='left')):
age_stratification_size = len(age_classes)
# Assign inputs to object
self.df = df
self.age_agg = age_stratification_size
# Check if spatial data is provided
self.spatial = None
if 'NIS' in self.df.index.names:
self.spatial = True
self.space_agg = len(self.df.index.get_level_values('NIS').unique().values)
# infer aggregation (prov, arr or mun)
if self.space_agg == 11:
self.agg = 'prov'
elif self.space_agg == 43:
self.agg = 'arr'
elif self.space_agg == 581:
self.agg = 'mun'
else:
raise Exception(f"Space is {G}-fold stratified. This is not recognized as being stratification at Belgian province, arrondissement, or municipality level.")
# Check if dose data is provided
self.doses = None
if 'dose' in self.df.index.names:
self.doses = True
self.dose_agg = len(self.df.index.get_level_values('dose').unique().values)
# Define start- and enddate
self.df_start = pd.Timestamp(self.df.index.get_level_values('date').min())
self.df_end = pd.Timestamp(self.df.index.get_level_values('date').max())
# Perform age conversion
# Define dataframe with desired format
iterables=[]
for index_name in self.df.index.names:
if index_name != 'age':
iterables += [self.df.index.get_level_values(index_name).unique()]
else:
iterables += [age_classes]
index = pd.MultiIndex.from_product(iterables, names=self.df.index.names)
self.new_df = pd.Series(index=index)
# Four possibilities exist: can this be sped up?
if self.spatial:
if self.doses:
# Shorten?
for date in self.df.index.get_level_values('date').unique():
for NIS in self.df.index.get_level_values('NIS').unique():
for dose in self.df.index.get_level_values('dose').unique():
data = self.df.loc[(date, NIS, slice(None), dose)]
self.new_df.loc[(date, NIS, slice(None), dose)] = self.convert_age_stratified_vaccination_data(data, age_classes, self.agg, NIS).values
else:
for date in self.df.index.get_level_values('date').unique():
for NIS in self.df.index.get_level_values('NIS').unique():
data = self.df.loc[(date,NIS)]
self.new_df.loc[(date, NIS)] = self.convert_age_stratified_vaccination_data(data, age_classes, self.agg, NIS).values
else:
if self.doses:
for date in self.df.index.get_level_values('date').unique():
for dose in self.df.index.get_level_values('dose').unique():
data = self.df.loc[(date, slice(None), dose)]
self.new_df.loc[(date, slice(None), dose)] = self.convert_age_stratified_vaccination_data(data, age_classes).values
else:
for date in self.df.index.get_level_values('date').unique():
data = self.df.loc[(date)]
self.new_df.loc[(date)] = self.convert_age_stratified_vaccination_data(data, age_classes).values
self.df = self.new_df
def convert_age_stratified_vaccination_data(self, data, age_classes, agg=None, NIS=None):
"""
A function to convert the sciensano vaccination data to the desired model age groups
Parameters
----------
data: pd.Series
A series of age-stratified vaccination incidences. Index must be of type pd.Intervalindex.
age_classes : pd.IntervalIndex
Desired age groups of the vaccination dataframe.
agg: str
Spatial aggregation: prov, arr or mun
NIS : str
NIS code of consired spatial element
Returns
-------
out: pd.Series
Converted data.
"""
# Pre-allocate new series
out = pd.Series(index = age_classes, dtype=float)
# Extract demographics
if agg:
data_n_individuals = construct_initN(data.index.get_level_values('age'), agg).loc[NIS,:].values
demographics = construct_initN(None, agg).loc[NIS,:].values
else:
data_n_individuals = construct_initN(data.index.get_level_values('age'), agg).values
demographics = construct_initN(None, agg).values
# Loop over desired intervals
for idx,interval in enumerate(age_classes):
result = []
for age in range(interval.left, interval.right):
try:
result.append(demographics[age]/data_n_individuals[data.index.get_level_values('age').contains(age)]*data.iloc[np.where(data.index.get_level_values('age').contains(age))[0][0]])
except:
result.append(0)
out.iloc[idx] = sum(result)
return out
@lru_cache()
def get_data(self,t):
if self.spatial:
if self.doses:
try:
# Only includes doses A, B and C (so not boosters!) for now
data = np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
data[:,:,:-1] = np.array(self.df.loc[t,:,:,:].values).reshape( (self.space_agg, self.age_agg, self.dose_agg) )
return data
except:
return np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
else:
try:
return np.array(self.df.loc[t,:,:].values).reshape( (self.space_agg, self.age_agg) )
except:
return np.zeros([self.space_agg, self.age_agg])
else:
if self.doses:
try:
return np.array(self.df.loc[t,:,:].values).reshape( (self.age_agg, self.dose_agg) )
except:
return np.zeros([self.age_agg, self.dose_agg])
else:
try:
return np.array(self.df.loc[t,:].values)
except:
return np.zeros(self.age_agg)
def unidose_2021_vaccination_campaign(self, states, initN, daily_doses, delay_immunity, vacc_order, stop_idx, refusal):
# Compute the number of vaccine eligible individuals
VE = states['S'] + states['R']
# Initialize N_vacc
N_vacc = np.zeros(self.age_agg)
# Start vaccination loop
idx = 0
while daily_doses > 0:
if idx == stop_idx:
daily_doses = 0 #End vaccination campaign at age 20
elif VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]] > daily_doses:
N_vacc[vacc_order[idx]] = daily_doses
daily_doses = 0
else:
N_vacc[vacc_order[idx]] = VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]]
daily_doses = daily_doses - (VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]])
idx = idx + 1
return N_vacc
def booster_campaign(self, states, daily_doses, vacc_order, stop_idx, refusal):
# Compute the number of booster eligible individuals
VE = states['S'][:,2] + states['E'][:,2] + states['I'][:,2] + states['A'][:,2] + states['R'][:,2] \
+ states['S'][:,3] + states['E'][:,3] + states['I'][:,3] + states['A'][:,3] + states['R'][:,3]
# Initialize N_vacc
N_vacc = np.zeros([self.age_agg,self.dose_agg])
# Booster vaccination strategy without refusal
idx = 0
while daily_doses > 0:
if idx == stop_idx:
daily_doses= 0 #End vaccination campaign at age 20
elif VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]] > daily_doses:
N_vacc[vacc_order[idx],3] = daily_doses
daily_doses= 0
else:
N_vacc[vacc_order[idx],3] = VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]]
daily_doses = daily_doses - (VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]])
idx = idx + 1
return N_vacc
# Default vaccination strategy = Sciensano data + hypothetical scheme after end of data collection for unidose model only (for now)
def __call__(self, t, states, param, initN, daily_doses=60000, delay_immunity = 21, vacc_order = [8,7,6,5,4,3,2,1,0], stop_idx=9, refusal = [0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3]):
"""
time-dependent function for the Belgian vaccination strategy
First, all available first-dose data from Sciensano are used. Then, the user can specify a custom vaccination strategy of "daily_first_dose" first doses per day,
administered in the order specified by the vector "vacc_order" with a refusal propensity of "refusal" in every age group.
This vaccination strategy does not distinguish between vaccination doses, individuals are transferred to the vaccination circuit after some time delay after the first dose.
For use with the model `COVID19_SEIRD` and `COVID19_SEIRD_spatial_vacc` in `~src/models/models.py`
Parameters
----------
t : int
Simulation time
states: dict
Dictionary containing values of model states
param : dict
Model parameter dictionary
initN : list or np.array
Demographics according to the epidemiological model age bins
daily_first_dose : int
Number of doses administered per day. Default is 30000 doses/day.
delay_immunity : int
Time delay between first dose vaccination and start of immunity. Default is 21 days.
vacc_order : array
Vector containing vaccination prioritization preference. Default is old to young. Must be equal in length to the number of age bins in the model.
stop_idx : float
Index of age group at which the vaccination campaign is halted. An index of 9 corresponds to vaccinating all age groups, an index of 8 corresponds to not vaccinating the age group corresponding with vacc_order[idx].
refusal: array
Vector containing the fraction of individuals refusing a vaccine per age group. Default is 30% in every age group. Must be equal in length to the number of age bins in the model.
Return
------
N_vacc : np.array
Number of individuals to be vaccinated at simulation time "t" per age, or per [patch,age]
"""
# Convert time to suitable format
t = pd.Timestamp(t.date())
# Convert delay to a timedelta
delay = pd.Timedelta(str(int(delay_immunity))+'D')
# Compute vaccinated individuals after spring-summer 2021 vaccination campaign
check_time = pd.Timestamp('2021-10-01')
# Only for non-spatial multi-vaccindation dose model
if not self.spatial:
if self.doses:
if t == check_time:
self.fully_vaccinated_0 = states['S'][:,2] + states['E'][:,2] + states['I'][:,2] + states['A'][:,2] + states['R'][:,2] + \
states['S'][:,3] + states['E'][:,3] + states['I'][:,3] + states['A'][:,3] + states['R'][:,3]
# Use data
if t <= self.df_end + delay:
return self.get_data(t-delay)
# Projection into the future
else:
if self.spatial:
if self.doses:
# No projection implemented
return np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
else:
# No projection implemented
return np.zeros([self.space_agg,self.age_agg])
else:
if self.doses:
return self.booster_campaign(states, daily_doses, vacc_order, stop_idx, refusal)
else:
return self.unidose_2021_vaccination_campaign(states, initN, daily_doses, delay_immunity, vacc_order, stop_idx, refusal)
###################################
## Google social policy function ##
###################################
class make_contact_matrix_function():
"""
Class that returns contact matrix based on 4 prevention parameters by default, but has other policies defined as well.
Input
-----
Nc_all : dictionnary
contact matrices for home, schools, work, transport, leisure and others
df_google : dataframe
google mobility data
Output
------
__class__ : default function
Default output function, based on contact_matrix_4prev
"""
def __init__(self, df_google, Nc_all):
self.df_google = df_google.astype(float)
self.Nc_all = Nc_all
# Compute start and endtimes of dataframe
self.df_google_start = df_google.index.get_level_values('date')[0]
self.df_google_end = df_google.index.get_level_values('date')[-1]
# Check if provincial data is provided
self.provincial = None
if 'NIS' in self.df_google.index.names:
self.provincial = True
self.space_agg = len(self.df_google.index.get_level_values('NIS').unique().values)
@lru_cache() # once the function is run for a set of parameters, it doesn't need to compile again
def __call__(self, t, prev_home=1, prev_schools=1, prev_work=1, prev_rest = 1,
school=None, work=None, transport=None, leisure=None, others=None, home=None):
"""
t : timestamp
current date
prev_... : float [0,1]
prevention parameter to estimate
school, work, transport, leisure, others : float [0,1]
level of opening of these sectors
if None, it is calculated from google mobility data
only school cannot be None!
"""
if school is None:
raise ValueError(
"Please indicate to which extend schools are open")
places_var = [work, transport, leisure, others]
places_names = ['work', 'transport', 'leisure', 'others']
GCMR_names = ['work', 'transport', 'retail_recreation', 'grocery']
if self.provincial:
if t < pd.Timestamp('2020-03-17'):
return np.ones(self.space_agg)[:,np.newaxis,np.newaxis]*self.Nc_all['total']
elif pd.Timestamp('2020-03-17') <= t <= self.df_google_end:
# Extract row at timestep t
row = -self.df_google.loc[(t, slice(None)),:]/100
else:
# Extract last 14 days and take the mean
row = -self.df_google.loc[(self.df_google_end - pd.Timedelta(days=14)): self.df_google_end, slice(None)].mean(level='NIS')/100
# Sort NIS codes from low to high
row.sort_index(level='NIS', ascending=True,inplace=True)
# Extract values
values_dict={}
for idx,place in enumerate(places_var):
if place is None:
place = 1 - row[GCMR_names[idx]].values
else:
try:
test=len(place)
except:
place = place*np.ones(self.space_agg)
values_dict.update({places_names[idx]: place})
# Schools:
try:
test=len(school)
except:
school = school*np.ones(self.space_agg)
# Construct contact matrix
CM = (prev_home*np.ones(self.space_agg)[:, np.newaxis,np.newaxis]*self.Nc_all['home'] +
(prev_schools*school)[:, np.newaxis,np.newaxis]*self.Nc_all['schools'] +
(prev_work*values_dict['work'])[:,np.newaxis,np.newaxis]*self.Nc_all['work'] +
(prev_rest*values_dict['transport'])[:,np.newaxis,np.newaxis]*self.Nc_all['transport'] +
(prev_rest*values_dict['leisure'])[:,np.newaxis,np.newaxis]*self.Nc_all['leisure'] +
(prev_rest*values_dict['others'])[:,np.newaxis,np.newaxis]*self.Nc_all['others'])
else:
if t < pd.Timestamp('2020-03-17'):
return self.Nc_all['total']
elif pd.Timestamp('2020-03-17') <= t <= self.df_google_end:
# Extract row at timestep t
row = -self.df_google.loc[t]/100
else:
# Extract last 14 days and take the mean
row = -self.df_google[-14:-1].mean()/100
# Extract values
values_dict={}
for idx,place in enumerate(places_var):
if place is None:
place = 1 - row[GCMR_names[idx]]
values_dict.update({places_names[idx]: place})
# Construct contact matrix
CM = (prev_home*self.Nc_all['home'] +
prev_schools*school*self.Nc_all['schools'] +
prev_work*values_dict['work']*self.Nc_all['work'] +
prev_rest*values_dict['transport']*self.Nc_all['transport'] +
prev_rest*values_dict['leisure']*self.Nc_all['leisure'] +
prev_rest*values_dict['others']*self.Nc_all['others'])
return CM
def all_contact(self):
return self.Nc_all['total']
def all_contact_no_schools(self):
return self.Nc_all['total'] - self.Nc_all['schools']
def ramp_fun(self, Nc_old, Nc_new, t, t_start, l):
"""
t : timestamp
current simulation time
t_start : timestamp
start of policy change
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * float( (t-t_start)/pd.Timedelta('1D') )
def delayed_ramp_fun(self, Nc_old, Nc_new, t, tau_days, l, t_start):
"""
t : timestamp
current simulation time
t_start : timestamp
start of policy change
tau : int
number of days before measures start having an effect
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * float( (t-t_start-tau_days)/pd.Timedelta('1D') )
####################
## National model ##
####################
def policies_all(self, t, states, param, l1, l2, prev_schools, prev_work, prev_rest_lockdown, prev_rest_relaxation, prev_home):
'''
Function that returns the time-dependant social contact matrix Nc for all COVID waves.
Input
-----
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
l1 : float
Compliance parameter for social policies during first lockdown 2020 COVID-19 wave
l2 : float
Compliance parameter for social policies during second lockdown 2020 COVID-19 wave
prev_{location} : float
Effectivity of contacts at {location}
Returns
-------
CM : np.array (9x9)
Effective contact matrix (output of __call__ function)
'''
t = pd.Timestamp(t.date())
# Convert compliance l to dates
l1_days = pd.Timedelta(l1, unit='D')
l2_days = pd.Timedelta(l2, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-08-03') # Summer lockdown in Antwerp
t5 = pd.Timestamp('2020-08-24') # End of summer lockdown in Antwerp
t6 = pd.Timestamp('2020-09-01') # end of summer holidays
t7 = pd.Timestamp('2020-09-21') # Opening universities
# Define key dates of second wave
t8 = pd.Timestamp('2020-10-19') # lockdown (1)
t9 = pd.Timestamp('2020-11-02') # lockdown (2)
t10 = pd.Timestamp('2020-11-16') # schools re-open
t11 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t12 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t13 = pd.Timestamp('2021-02-15') # Spring break starts
t14 = pd.Timestamp('2021-02-21') # Spring break ends
t15 = pd.Timestamp('2021-02-28') # Contact increase in children
t16 = pd.Timestamp('2021-03-26') # Start of Easter holiday
t17 = pd.Timestamp('2021-04-18') # End of Easter holiday
t18 = pd.Timestamp('2021-06-01') # Start of lockdown relaxation
t19 = | pd.Timestamp('2021-07-01') | pandas.Timestamp |
import pandas as pd
import numpy as np
from datetime import datetime
from multiprocessing import Pool
from functools import partial
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from datetime import datetime
import seaborn as sns
import matplotlib.dates as dates
import calendar
from itertools import *
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
def savePlots(loc, plt):
plt.savefig(loc)
event_colors = {'CommitCommentEvent':'#e59400',
'CreateEvent':'#B2912F',
'DeleteEvent':'#B276B2',
'ForkEvent':'#4D4D4D',
'IssueCommentEvent':'#DECF3F',
'IssuesEvent':'#60BD68',
'PullRequestEvent':'#5DA5DA',
'PullRequestReviewCommentEvent':'#D3D3D3',
'PushEvent':'#F17CB0',
'WatchEvent':'#F15854'}
def plot_histogram(data,xlabel,ylabel,title, log=False, loc=False):
sns.set_style('whitegrid')
sns.set_context('talk')
##ploting Histogram
_,bins = np.histogram(data,bins='doane')
measurement = pd.DataFrame(data)
measurement.plot(kind='hist',bins=bins,legend=False,cumulative=False,normed=False,log=log)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.tight_layout()
if loc != False:
savePlots(loc,plt)
return
return plt.show()
def plot_line_graph(data,xlabel,ylabel,title,labels="",loc=False):
sns.set_style('whitegrid')
sns.set_context('talk')
##plotting line graph
_,bins = np.histogram(data,bins='auto')
Watchmeasurement = | pd.DataFrame(data) | pandas.DataFrame |
# Standard library imports
from sqlalchemy.inspection import inspect
from datetime import datetime, timedelta
from pandas import isnull
# project imports
from PhosQuest_app.data_access.db_sessions import import_session_maker
from PhosQuest_app.data_access.class_functions import get_classes_key_attrs
# define null-type of values that are treated differently
NULL_VALS = [None, '', ' ', '-', 'nan', 'NaN']
def get_key_vals(df_to_class_dict, classes_keys, row):
"""
Gets the key values for the class instances in a data frame row.
:param df_to_class_dict: data frame heading to class & attribute (dict)
{'DF header': [(Class, 'class_attribute')]}
:param classes_keys: {Class: ['key_attr1', ...], ...} (dict)
:param row: pandas data frame row (df row)
:return: key values for the class instances in the row (dict)
{class: {key_attr: key_value, ...}, ...}
"""
# get keys for classes in row
# dictionary of class to primary key attributes and key values tuples
new_table_keys = {} # {class: {key_attr: key_value, ...}, ...}
# iterate through dict mapping df_heading: (Class, class_attr)
for df_heading, class_matches in df_to_class_dict.items():
for class_match in class_matches:
# df heading corresponds to class and class attribute
class_name = class_match[0]
class_attr = class_match[1]
# if the row contains a non-null value and the df heading
# contains a primary key, add key value to dict
if (class_attr in classes_keys[class_name]
and row[df_heading] not in NULL_VALS
and not | isnull(row[df_heading]) | pandas.isnull |
import pandas as pd
import numpy as np
import git
import os
import sys
from pathlib import Path
import matplotlib.pyplot as plt
#-- Setup paths
# Get parent directory using git
repo = git.Repo("./", search_parent_directories=True)
homedir = repo.working_dir
# Change working directory to parent directory
os.chdir(homedir)
# Add 'Dan' directory to the search path for imports
sys.path.append('Dan')
# Import our custom cube managing functions
import cube_formatter as cf
#-- Setup interactive matplotlib
#%matplotlib widget
#-- Control parameters
# Top N clusters to plot with the most deaths
# Set to -1 to plot all
plotN = 20
# Cluster fips to plot
# If isShowAllocations=True, all counties from the following cluster will be plotted
clst2Show = 80 # "FIPS" of cluster to show
# Data Manipulation flags (should match those used in creating submission file)
isComputeDaily = False # Flag to translate cummulative data to daily counts
#- Plot-type control flags
isCumul = True # Flag to denote that the plot should be cumulative, not daily deaths
# NOTE: the following two flags are independent of each other (ie. you can run either, or, or both)
isShowClusters = True # Flag to denote that each cluster should be plotted on its own
isShowAllocations = True # Flag to denote that the counties within clst2Show should be shown
# Key days (should match those used in creating the cube)
global_dayzero = | pd.to_datetime('2020 Jan 21') | pandas.to_datetime |
# IMAGE CLASSIFIER COMMAND LINE APPLICATION
# predict.py
#
# USAGE:
# python predict.py
# --data_dir Path to the folder of the flower images
# --save_dir Path to save the model checkpoints
# --path_to_image Path to an image file
# --category_names Path to JSON file containing category labels
# --to_device Run model on CPU or GPU
# --top_k_classes Top k most likely classes
#
# Some Example files for testing:
# /3/image_06634.jpg
# /7/image_07215.jpg
# /33/image_06460.jpg
# /71/image_04514.jpg
#
# PROGRAMMER: <NAME>
# DATE CREATED: February 14, 2019
# REVISED DATE:
# PURPOSE:
# Uses a trained network to predict the flower name of the input image.
# Receives a single file name
# Returns the flower name and top K most likely class probabilities
#
# Import modules
import torch
from torch import nn
from torch import optim
import json
import torchvision
from torchvision import datasets, transforms, models
import time
import numpy as np
import pandas as pd
import argparse
import os
from PIL import Image
# ***********************
# WRITE THE FOLLOWING
# ***********************
# ✔ load the checkpoint
# ✔ process image
# ✔ class prediction
# ✔ show_prediction
# ✔ command line args
def load_saved_checkpoint(model_path):
"""
loads a saved checkpoint and rebuilds the model
"""
saved_model = torch.load(model_path)
arch_name = saved_model['arch_name']
if (arch_name == 'densenet121'):
model = models.densenet121(pretrained=True)
else:
model = models.vgg16(pretrained=True)
model.classifier = saved_model['classifier']
criterion = nn.NLLLoss()
model.load_state_dict(saved_model['model_state'])
model.class_to_idx = saved_model['model_class_index']
optimizer = saved_model['optimizer_state']
epochs = saved_model['epochs']
for param in model.parameters():
param.requires_grad = False
return model, saved_model['model_class_index']
def process_image(image):
'''
Process a PIL image for use in a PyTorch model
Scales, crops, and normalizes a PIL image for a PyTorch model,
'''
img_mean = np.array([0.485, 0.456, 0.406])
img_std = np.array([0.229, 0.224, 0.225])
img = Image.open(image)
# just checking
width, height = img.size
# print('initial width={} height={}'.format(width, height))
# recommendation from Udacity reviewer
short_side = min(width, height)
img = img.resize( (int((width / short_side)*256), int((height / short_side)*256)) )
# just checking...again
width, height = img.size
print('resized width={} height={}'.format(width, height))
left = (width - 224) / 2
top = (height - 224) / 2
right = (width + 224) / 2
bottom = (height + 224) / 2
img = img.crop((left, top, right, bottom)) #crop out center
# make this a numpy array
img = np.array(img)
# RGB values are 8-bit: 0 to 255
# dividing by 255 gives us a range from 0.0 to 1.0
img = img / 255
img_norm = (img - img_mean) / img_std
img_norm = np.transpose(img_norm, (2, 0, 1))
return torch.Tensor(img_norm) # convert back to PyTorch tensor
def predict(image_path, model, to_device, topk):
"""
Predict the class (or classes) of an image using a trained deep learning model.
"""
# implement the code to predict the class from an image file
if (to_device == 'gpu'):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
else:
device = 'cpu'
model.to(device)
model.eval() # inference mode
img = process_image(image_path)
# without this get an error about sizes not matching
# Input type (torch.FloatTensor) and weight type (torch.cuda.FloatTensor) should be the same
img = img.to(device)
# not sure why unsqueeze is needed, something to do with the batch
# could not get answer from mentors or within student hub or knowledge
img = img.unsqueeze(0)
with torch.no_grad():
logits = model.forward(img)
# https://pytorch.org/docs/stable/torch.html#torch.topk
# returns the topk largest elements of the given input tensor
probs, probs_labels = torch.topk(logits, topk)
probs = probs.exp() # calc all exponential of all elements
class_to_idx = model.class_to_idx
# more errors, can't convert CUDA tensor to numpy.
# Use Tensor.cpu() to copy the tensor to host memory first.
# thanks for the suggestion!
probs = probs.cpu().numpy()
probs_labels = probs_labels.cpu().numpy()
# gets the indexes in numerical order: 0 to 101
classes_indexed = {model.class_to_idx[i]: i for i in model.class_to_idx}
# and still more errors - must be a list!
classes_list = list()
for label in probs_labels[0]:
classes_list.append(classes_indexed[label])
return (probs[0], classes_list)
def show_prediction(probs, classes, json_category_names):
"""
Display probabilites and name from the image
"""
with open(json_category_names, 'r') as f:
cat_to_name = json.load(f)
flower_names = [cat_to_name[i] for i in classes]
df = pd.DataFrame(
{'flowers': pd.Series(data=flower_names),
'probabilities': | pd.Series(data=probs, dtype='float64') | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# # [Memanggil Library Pandas](https://academy.dqlab.id/main/livecode/178/346/1682)
# In[1]:
import pandas as pd
import numpy as np
# # [DataFrame & Series](https://academy.dqlab.id/main/livecode/178/346/1683)
# In[2]:
import pandas as pd
# Series
number_list = pd.Series([1, 2, 3, 4, 5, 6])
print("Series:")
print(number_list)
# DataFrame
matrix = [[1, 2, 3],
['a','b','c'],
[3, 4, 5],
['d',4,6]]
matrix_list = pd.DataFrame(matrix)
print("DataFrame:")
print(matrix_list)
# # [Atribut DataFrame & Series - Part 1](https://academy.dqlab.id/main/livecode/178/346/1684)
# In[3]:
import pandas as pd
# Series
number_list = pd.Series([1,2,3,4,5,6])
# DataFrame
matrix_list = pd.DataFrame([[1,2,3],
['a','b','c'],
[3,4,5],
['d',4,6]])
# [1] attribute .info()
print("[1] attribute .info()")
print(matrix_list.info())
# [2] attribute .shape
print("\n[2] attribute .shape")
print(" Shape dari number_list:", number_list.shape)
print(" Shape dari matrix_list:", matrix_list.shape)
# [3] attribute .dtypes
print("\n[3] attribute .dtypes")
print(" Tipe data number_list:", number_list.dtypes)
print(" Tipe data matrix_list:", matrix_list.dtypes)
# [4] attribute .astype()
print("\n[4] attribute .astype()")
print(" Konversi number_list ke str:", number_list.astype("str"))
print(" Konversi matrix_list ke str:", matrix_list.astype("str"))
# # [Atribut DataFrame & Series - Part 2](https://academy.dqlab.id/main/livecode/178/346/1685)
# In[4]:
import pandas as pd
# Series
number_list = pd.Series([1,2,3,4,5,6])
# DataFrame
matrix_list = pd.DataFrame([[1,2,3],
['a','b','c'],
[3,4,5],
['d',4,6]])
# [5] attribute .copy()
print("[5] attribute .copy()")
num_list = number_list.copy()
print(" Copy number_list ke num_list:", num_list)
mtr_list = matrix_list.copy()
print(" Copy matrix_list ke mtr_list:", mtr_list)
# [6] attribute .to_list()
print("[6] attribute .to_list()")
print(number_list.to_list())
# [7] attribute .unique()
print("[7] attribute .unique()")
print(number_list.unique())
# # [Atribut DataFrame & Series - Part 3](https://academy.dqlab.id/main/livecode/178/346/1686)
# In[5]:
import pandas as pd
# Series
number_list = pd.Series([1,2,3,4,5,6])
# DataFrame
matrix_list = pd.DataFrame([[1,2,3],
['a','b','c'],
[3,4,5],
['d',4,6]])
# [8] attribute .index
print("[8] attribute .index")
print(" Index number_list:", number_list.index)
print(" Index matrix_list:", matrix_list.index)
# [9] attribute .columns
print("[9] attribute .columns")
print(" Column matrix_list:", matrix_list.columns)
# [10] attribute .loc
print("[10] attribute .loc")
print(" .loc[0:1] pada number_list:", number_list.loc[0:1])
print(" .loc[0:1] pada matrix_list:", matrix_list.loc[0:1])
# [11] attribute .iloc
print("[11] attribute .iloc")
print(" iloc[0:1] pada number_list:", number_list.iloc[0:1])
print(" iloc[0:1] pada matrix_list:", matrix_list.iloc[0:1])
# # [Creating Series & Dataframe from List](https://academy.dqlab.id/main/livecode/178/346/1688)
# In[6]:
import pandas as pd
# Creating series from list
ex_list = ['a',1,3,5,'c','d']
ex_series = pd.Series(ex_list)
print(ex_series)
# Creating dataframe from list of list
ex_list_of_list = [[1, 'a', 'b', 'c'],
[2.5, 'd', 'e', 'f'],
[5, 'g', 'h', 'i'],
[7.5, 'j', 10.5, 'l']]
index = ['dq', 'lab', 'kar', 'lan']
cols = ['float', 'char', 'obj', 'char']
ex_df = pd.DataFrame(ex_list_of_list, index=index, columns=cols)
print(ex_df)
# # [Creating Series & Dataframe from Dictionary](https://academy.dqlab.id/main/livecode/178/346/1689)
# In[7]:
import pandas as pd
# Creating series from dictionary
dict_series = {'1':'a',
'2':'b',
'3':'c'}
ex_series = pd.Series(dict_series)
print(ex_series)
# Creating dataframe from dictionary
df_series = {'1':['a','b','c'],
'2':['b','c','d'],
'4':[2,3,'z']}
ex_df = pd.DataFrame(df_series)
print(ex_df)
# # [Creating Series & Dataframe from Numpy Array](https://academy.dqlab.id/main/livecode/178/346/1690)
# In[9]:
# import pandas as pd
import numpy as np
# Creating series from numpy array (1D)
arr_series = np.array([1,2,3,4,5,6,6,7])
ex_series = pd.Series(arr_series)
print(ex_series)
# Creating dataframe from numpy array (2D)
arr_df = np.array([[1, 2, 3, 5],
[5, 6, 7, 8],
['a','b','c',10]])
ex_df = pd.DataFrame(arr_df)
print(ex_df)
# # [Read Dataset - CSV dan TSV](https://academy.dqlab.id/main/livecode/178/347/1694)
# In[10]:
import pandas as pd
# File CSV
df_csv = pd.read_csv("https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/sample_csv.csv")
print(df_csv.head(3)) # Menampilkan 3 data teratas
# File TSV
df_tsv = pd.read_csv("https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/sample_tsv.tsv", sep='\t')
print(df_tsv.head(3)) # Menampilkan 3 data teratas
# # [Read Dataset - Excel](https://academy.dqlab.id/main/livecode/178/347/1695)
# In[11]:
import pandas as pd
# File xlsx dengan data di sheet "test"
df_excel = pd.read_excel("https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/sample_excel.xlsx", sheet_name="test")
print(df_excel.head(4)) # Menampilkan 4 data teratas
# # [Read Dataset - JSON](https://academy.dqlab.id/main/livecode/178/347/1698)
# In[13]:
import pandas as pd
# File JSON
url = "https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/covid2019-api-herokuapp-v2.json"
df_json = pd.read_json(url)
print(df_json.head(10)) # Menampilkan 10 data teratas
# # [Head & Tail](https://academy.dqlab.id/main/livecode/178/347/2143)
# In[14]:
import pandas as pd
# Baca file sample_csv.csv
df = pd.read_csv("https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/sample_csv.csv")
# Tampilkan 3 data teratas
print("Tiga data teratas:\n", df.head(3))
# Tampilkan 3 data terbawah
print("Tiga data terbawah:\n", df.tail(3))
# # [Indexing - Part 2](https://academy.dqlab.id/main/livecode/178/429/2133)
# In[15]:
import pandas as pd
# Baca file TSV sample_tsv.tsv
df = pd.read_csv("https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/sample_tsv.tsv", sep="\t")
# Index dari df
print("Index:", df.index)
# Column dari df
print("Columns:", df.columns)
# # [Indexing - Part 3](https://academy.dqlab.id/main/livecode/178/429/2134)
# In[16]:
import pandas as pd
# Baca file TSV sample_tsv.tsv
df = pd.read_csv("https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/sample_tsv.tsv", sep="\t")
# Set multi index df
df_x = df.set_index(['order_date', 'city', 'customer_id'])
# Print nama dan level dari multi index
for name, level in zip(df_x.index.names, df_x.index.levels):
print(name,':',level)
# # [Indexing - Part 4](https://academy.dqlab.id/main/livecode/178/429/2135)
# In[17]:
import pandas as pd
# Baca file sample_tsv.tsv untuk 10 baris pertama saja
df = | pd.read_csv("https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/sample_tsv.tsv", sep="\t", nrows=10) | pandas.read_csv |
# import modules ----------------------
import nba_py
import nba_py.game
import nba_py.player
import nba_py.team
import pandas as pd
import numpy as np
import datetime
import pytz
old_settings = np.seterr(all='print')
np.geterr()
print('modules imported')
# define functions ----------------------
def get_games(date):
"""
:param date: datetime.date, the match day
:return: df, all the games on the given day
"""
return nba_py.Scoreboard(month=date.month,
day=date.day,
year=date.year,
league_id='00',
offset=0).game_header()[['GAME_ID', 'HOME_TEAM_ID', 'VISITOR_TEAM_ID']]
def get_players(games, all_players):
"""
:param games: df, some games
:param all_players: df, all players list of this season
:return: df, all players of the given games
"""
home_team_player = all_players[all_players['TEAM_ID'].isin(games['HOME_TEAM_ID'])][['PERSON_ID', 'TEAM_ID']]
home_team_player['Location'] = 'HOME'
away_team_player = all_players[all_players['TEAM_ID'].isin(games['VISITOR_TEAM_ID'])][['PERSON_ID', 'TEAM_ID']]
away_team_player['Location'] = 'AWAY'
players = pd.concat([home_team_player, away_team_player])
game_team = pd.concat([games[['HOME_TEAM_ID', 'GAME_ID']].rename(columns={'HOME_TEAM_ID': 'TEAM_ID'}),
games[['VISITOR_TEAM_ID', 'GAME_ID']].rename(columns={'VISITOR_TEAM_ID': 'TEAM_ID'})])
players = pd.merge(players, game_team, on='TEAM_ID')
team_team = pd.concat(
[games[['HOME_TEAM_ID', 'VISITOR_TEAM_ID']].rename(columns={'HOME_TEAM_ID': 'TEAM_ID',
'VISITOR_TEAM_ID': 'Against_Team_ID'}),
games[['VISITOR_TEAM_ID', 'HOME_TEAM_ID']].rename(columns={'VISITOR_TEAM_ID': 'TEAM_ID',
'HOME_TEAM_ID': 'Against_Team_ID'})])
players = pd.merge(players, team_team, on='TEAM_ID')
players = | pd.merge(players, all_players[['PERSON_ID', 'DISPLAY_FIRST_LAST', 'TEAM_ABBREVIATION']], on='PERSON_ID') | pandas.merge |
"""
Attribution
"""
import datetime
import pandas as pd
import numpy as np
import win32com.client
import matplotlib
import matplotlib.pyplot as plt
import attribution.extraction
from dateutil.relativedelta import relativedelta
start_date = datetime.datetime(2020, 1, 31)
end_date = datetime.datetime(2020, 3, 31)
input_directory = 'U:/CIO/#Investment_Report/Data/input/'
output_directory = 'U:/CIO/#Attribution/tables/base/'
table_filename = 'link_2019-12-31.csv'
returns_filename = 'returns_2020-03-31_attribution.csv'
market_values_filename = 'market_values_2020-03-31_attribution.csv'
asset_allocations_filename = 'asset_allocations_2020-03-31.csv'
latex_summary1_column_names = ['Returns', 'High Growth', "Bal' Growth", 'Balanced', 'Conservative', 'Growth', "Emp' Reserve"]
latex_summary2_column_names = ['Attribution', 'High Growth', "Bal' Growth", 'Balanced', 'Conservative', 'Growth', "Emp' Reserve"]
latex_column_names = ['Asset Class', 'High Growth', "Bal' Growth", 'Balanced', 'Conservative', 'Growth', "Emp' Reserve"]
# Creates variable names for linked table
periods = (end_date.year - start_date.year) * 12 + (end_date.month - start_date.month) + 1
market_value = str(periods) + '_market_value'
r_portfolio = str(periods) + '_r_portfolio'
r_benchmark = str(periods) + '_r_benchmark'
r_excess = str(periods) + '_r_excess'
r_diff = str(periods) + '_r_diff'
r_diff_sq = str(periods) + '_r_diff_sq'
r_active_contribution = str(periods) + '_r_active_contribution'
w_portfolio = str(periods) + '_w_portfolio'
w_benchmark = str(periods) + '_w_benchmark'
AA = str(periods) + '_AA'
SS = str(periods) + '_SS'
interaction = str(periods) + '_interaction'
total_effect = str(periods) + '_total_effect'
residual = str(periods) + '_residual'
total = str(periods) + '_total'
# Loads table
df_table = attribution.extraction.load_table(input_directory + 'link/' + table_filename)
# Loads returns
df_returns = attribution.extraction.load_returns(input_directory + 'returns/' + returns_filename)
# Reshapes returns dataframe from wide to long
df_returns = df_returns.transpose().reset_index(drop=False).rename(columns={'index': 'Manager'})
df_returns = pd.melt(df_returns, id_vars=['Manager'], value_name='1_r')
# Selects returns for this month or within a date_range
df_returns = df_returns[(df_returns['Date'] >= start_date) & (df_returns['Date'] <= end_date)].reset_index(drop=True)
df_benchmarks = pd.merge(
left=df_returns,
right=df_table,
left_on=['Manager'],
right_on=['Associated Benchmark'],
how='inner'
)
df_benchmarks = df_benchmarks[['Date', 'Associated Benchmark', '1_r', 'ModelCode']]
df_benchmarks.columns = ['Date', 'Benchmark Name', 'bmk_1_r', 'ModelCode']
df_returns_benchmarks = pd.merge(
left=df_returns,
right=df_benchmarks,
left_on=['Date', 'Manager'],
right_on=['Date', 'ModelCode'],
how='inner'
)
# Loads market values
df_market_values = attribution.extraction.load_market_values(input_directory + 'market_values/' + market_values_filename)
# Reshapes market values dataframe from wide to long
df_market_values = df_market_values.transpose().reset_index(drop=False).rename(columns={'index': 'Manager'})
df_market_values = | pd.melt(df_market_values, id_vars=['Manager'], value_name='Market Value') | pandas.melt |
import os
import time
from datetime import timedelta
import pandas as pd
import pytest
from peakina.cache import InMemoryCache
from peakina.datasource import DataSource, read_pandas
from peakina.helpers import TypeEnum
from peakina.io import MatchEnum
@pytest.fixture
def read_csv_spy(mocker):
read_csv = mocker.spy(pd, "read_csv")
# need to mock the validation as the signature is changed via the spy
mocker.patch("peakina.datasource.validate_kwargs", return_value=True)
return read_csv
def test_scheme():
"""It should be able to set scheme"""
assert DataSource("my/local/path/file.csv").scheme == ""
assert DataSource("ftp://remote/path/file.csv").scheme == "ftp"
with pytest.raises(AttributeError) as e:
DataSource("pika://wtf/did/I/write")
assert str(e.value) == "Invalid scheme 'pika'"
def test_type():
"""It should be able to set type if possible"""
assert DataSource("myfile.csv").type is TypeEnum.CSV
with pytest.raises(ValueError):
DataSource("myfile.csv$")
assert DataSource("myfile.tsv$", match=MatchEnum.GLOB).type is TypeEnum.CSV
assert DataSource("myfile.*", match=MatchEnum.GLOB).type is None
def test_validation_kwargs(mocker):
"""It should be able to validate the extra kwargs"""
validatation_kwargs = mocker.patch("peakina.datasource.validate_kwargs")
DataSource("myfile.csv")
validatation_kwargs.assert_called_once_with({}, "csv")
validatation_kwargs.reset_mock()
DataSource("myfile.*", match=MatchEnum.GLOB)
validatation_kwargs.assert_called_once_with({}, None)
validatation_kwargs.reset_mock()
def test_csv_with_sep(path):
"""It should be able to detect separator if not set"""
ds = DataSource(path("0_0_sep.csv"))
assert ds.get_df().shape == (2, 2)
ds = DataSource(path("0_0_sep.csv"), reader_kwargs={"skipfooter": 1, "engine": "python"})
assert ds.get_df().shape == (1, 2)
assert ds.get_df().to_dict(orient="records") == [{"a": 0, "b": 0}]
ds = DataSource(path("0_0_sep.csv"), reader_kwargs={"sep": ","})
assert ds.get_df().shape == (2, 1)
def test_csv_with_encoding(path):
"""It should be able to detect the encoding if not set"""
df = DataSource(path("latin_1.csv")).get_df()
assert df.shape == (2, 7)
assert "unité économique" in df.columns
def test_csv_with_trailing_newline(path):
"""It should not count last empty line"""
meta = DataSource(path("trailing_newline.csv")).get_metadata()
assert meta["total_rows"] == 2
def test_csv_default_encoding(path):
"""We should set `None` as default encoding for pandas readers"""
df = DataSource(path("pika.csv")).get_df()
assert df.shape == (486, 19)
def test_csv_western_encoding(path):
"""
It should be able to use a specific encoding
"""
ds = DataSource(path("encoded_western_short.csv"), reader_kwargs={"encoding": "windows-1252"})
df = ds.get_df()
assert df.shape == (2, 19)
df_meta = ds.get_metadata()
assert df_meta == {"df_rows": 2, "total_rows": 2}
# with CLRF line-endings
ds = DataSource(
path("encoded_western_clrf_short.csv"), reader_kwargs={"encoding": "windows-1252"}
)
df = ds.get_df()
assert df.shape == (2, 19)
df_meta = ds.get_metadata()
assert df_meta == {"df_rows": 2, "total_rows": 2}
# Encoding auto-detection
ds = DataSource(path("encoded_western_short.csv"))
df = ds.get_df()
assert df.shape == (2, 19)
df_meta = ds.get_metadata()
assert df_meta == {"df_rows": 2, "total_rows": 2}
def test_csv_header_row(path):
"""
Total number of rows must not include the header rows
"""
# Without header
ds_file_without_header = DataSource(path("0_0.csv"), reader_kwargs={"names": ["colA", "colB"]})
assert ds_file_without_header.get_df().shape == (3, 2)
meta = ds_file_without_header.get_metadata()
assert meta["total_rows"] == 3
assert meta["df_rows"] == 3
# With header
ds_file_with_header = DataSource(path("0_0.csv"))
assert ds_file_with_header.get_df().shape == (2, 2)
meta = ds_file_with_header.get_metadata()
assert meta["total_rows"] == 2
assert meta["df_rows"] == 2
def test_csv_with_sep_and_encoding(path):
"""It should be able to detect everything"""
ds = DataSource(path("latin_1_sep.csv"))
assert ds.get_df().shape == (2, 7)
def test_read_pandas(path):
"""It should be able to detect everything with read_pandas shortcut"""
assert read_pandas(path("latin_1_sep.csv")).shape == (2, 7)
def test_read_pandas_excel(path):
"""It should be able to detect everything with read_pandas shortcut"""
assert read_pandas(path("0_2.xls"), keep_default_na=False).shape == (2, 2)
df = read_pandas(path("0_2.xls"), skipfooter=1)
assert df.shape == (1, 2)
assert df.to_dict(orient="records") == [{"a": 3, "b": 4}]
def test_match(path):
"""It should be able to concat files matching a pattern"""
ds = DataSource(path(r"0_\d.csv"), match=MatchEnum.REGEX)
df = ds.get_df()
assert set(df["__filename__"]) == {"0_0.csv", "0_1.csv"}
assert df.shape == (4, 3)
def test_match_different_file_types(path):
"""It should be able to match even different types, encodings or seps"""
ds = DataSource(path("0_*"), match=MatchEnum.GLOB)
df = ds.get_df()
assert set(df["__filename__"]) == {"0_0.csv", "0_0_sep.csv", "0_1.csv", "0_2.xls"}
assert df.shape == (8, 3)
@pytest.mark.flaky(reruns=5)
def test_ftp(ftp_path):
ds = DataSource(f"{ftp_path}/sales.csv")
assert ds.get_df().shape == (208, 15)
@pytest.mark.flaky(reruns=5)
def test_ftp_match(ftp_path):
ds = DataSource(f"{ftp_path}/my_data_\\d{{4}}\\.csv$", match=MatchEnum.REGEX)
assert ds.get_df().shape == (8, 3)
def test_s3(s3_endpoint_url):
dirpath = "s3://accessKey1:verySecretKey1@mybucket"
ds = DataSource(
f"{dirpath}/0_0.csv",
fetcher_kwargs={"client_kwargs": {"endpoint_url": s3_endpoint_url}},
)
assert ds.get_df().shape == (2, 2)
ds = DataSource(
f"{dirpath}/0_*.csv",
match=MatchEnum.GLOB,
fetcher_kwargs={"client_kwargs": {"endpoint_url": s3_endpoint_url}},
)
assert ds.get_df().shape == (4, 3)
# With subdirectories
ds = DataSource(
f"{dirpath}/mydir/0_*.csv",
match=MatchEnum.GLOB,
fetcher_kwargs={"client_kwargs": {"endpoint_url": s3_endpoint_url}},
)
assert ds.get_df().shape == (4, 3)
def test_basic_excel(path):
"""It should not add a __sheet__ column when retrieving a single sheet"""
ds = DataSource(path("fixture-multi-sheet.xlsx"))
df = pd.DataFrame({"Month": [1], "Year": [2019]})
assert ds.get_df().equals(df)
assert ds.get_metadata() == {
"df_rows": 1,
"sheetnames": ["January", "February"],
"total_rows": 4, # we have for rows as total here because january sheet has 1 row and February sheet has 3 (1 + 3)
}
# On match datasources, no metadata is returned:
assert DataSource(path("fixture-multi-sh*t.xlsx"), match=MatchEnum.GLOB).get_metadata() == {}
# test with skiprows
ds = DataSource(path("fixture-single-sheet.xlsx"), reader_kwargs={"skiprows": 2})
assert ds.get_df().shape == (0, 2)
# test with nrows and skiprows
ds = DataSource(path("fixture-single-sheet.xlsx"), reader_kwargs={"nrows": 1, "skiprows": 2})
assert ds.get_df().shape == (0, 2)
# test with skiprows and limit offset
ds = DataSource(
path("fixture-single-sheet.xlsx"),
reader_kwargs={"skiprows": 2, "preview_nrows": 1, "preview_offset": 0},
)
assert ds.get_df().shape == (0, 2)
# test with nrows and limit offset
ds = DataSource(
path("fixture-single-sheet.xlsx"),
reader_kwargs={"nrows": 1, "preview_nrows": 1, "preview_offset": 0},
)
assert ds.get_df().shape == (1, 2)
# test with the new file format type
ds = DataSource(
path("fixture_new_format.xls"), reader_kwargs={"preview_nrows": 1, "preview_offset": 2}
)
assert ds.get_df().shape == (1, 8)
# test with nrows
ds = DataSource(path("fixture_new_format.xls"), reader_kwargs={"nrows": 2})
assert ds.get_df().shape == (2, 8)
# test with skiprows
ds = DataSource(path("fixture_new_format.xls"), reader_kwargs={"skiprows": 2})
assert ds.get_df().shape == (7, 8)
# test with nrows and skiprows
ds = DataSource(path("fixture_new_format.xls"), reader_kwargs={"nrows": 1, "skiprows": 2})
assert ds.get_df().shape == (1, 8)
def test_multi_sheets_excel(path):
"""It should add a __sheet__ column when retrieving multiple sheet"""
ds = DataSource(path("fixture-multi-sheet.xlsx"), reader_kwargs={"sheet_name": None})
# because our excel file has 1 entry on January sheet and 3 entries in February sheet
df = pd.DataFrame(
{
"Month": [1, 2, 3, 4],
"Year": [2019, 2019, 2021, 2022],
"__sheet__": ["January", "February", "February", "February"],
}
)
assert ds.get_df().equals(df)
assert ds.get_metadata() == {
"df_rows": 4,
"sheetnames": ["January", "February"],
"total_rows": 4,
}
def test_basic_xml(path):
"""It should apply optional jq filter when extracting an xml datasource"""
# No jq filter -> everything is in one cell
assert DataSource(path("fixture.xml")).get_df().shape == (1, 1)
jq_filter = ".records"
ds = DataSource(path("fixture.xml"), reader_kwargs={"filter": jq_filter})
assert ds.get_df().shape == (2, 1)
jq_filter = '.records .record[] | .["@id"]|=tonumber'
ds = DataSource(path("fixture.xml"), reader_kwargs={"filter": jq_filter})
df = pd.DataFrame({"@id": [1, 2], "title": ["Keep on dancin'", "Small Talk"]})
assert ds.get_df().equals(df)
jq_filter = '.records .record[] | .["@id"]|=tonumber'
ds = DataSource(path("fixture.xml"), reader_kwargs={"filter": jq_filter, "preview_nrows": 1})
df = pd.DataFrame({"@id": [1], "title": ["Keep on dancin'"]})
assert ds.get_df().equals(df)
def test_basic_json(path):
"""It should apply optional jq filter when extracting a json datasource"""
# No jq filter -> everything is in one cell
assert DataSource(path("fixture.json")).get_df().shape == (1, 1)
jq_filter = '.records .record[] | .["@id"]|=tonumber'
ds = DataSource(path("fixture.json"), reader_kwargs={"filter": jq_filter, "lines": True})
df = pd.DataFrame({"@id": [1, 2], "title": ["Keep on dancin'", "Small Talk"]})
assert ds.get_df().equals(df)
ds = DataSource(
path("fixture.json"),
reader_kwargs={"filter": jq_filter, "lines": True, "preview_nrows": 1},
)
df = pd.DataFrame({"@id": [1], "title": ["Keep on dancin'"]})
assert ds.get_df().equals(df)
ds = DataSource(
path("fixture.json"),
reader_kwargs={"preview_nrows": 1},
)
assert ds.get_df().shape == (1, 1)
def test_basic_parquet(path):
"""It should open a basic parquet file"""
df = DataSource(path("userdata.parquet")).get_df()
assert df.shape == (1000, 13)
df = DataSource(
path("userdata.parquet"),
type=TypeEnum.PARQUET,
reader_kwargs={"columns": ["title", "country"]},
).get_df()
assert df.shape == (1000, 2)
def test_empty_file(path):
"""It should return an empty dataframe if the file is empty"""
assert DataSource(path("empty.csv")).get_df().equals( | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import pickle
import csv
import glob
import errno
import re
from sklearn.preprocessing import Imputer, StandardScaler
from sklearn.cluster import KMeans
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
from keras.layers import Dense, Embedding, Dropout, Reshape, Merge, Input, LSTM, concatenate
from keras.layers import TimeDistributed
from keras.models import Sequential, Model
from keras.optimizers import Adam, Adamax
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.preprocessing import sequence
from keras.models import load_model
from keras import utils
class ModelDataSettings:
def __init__(self):
self.label_column = ''
self.label_type = ''
self.key_column = ''
self.category_columns = []
self.numeric_columns = []
self.sequence_columns = []
self.sequence_length = 10
self.sequence_pad = 'post'
self.value_index = {}
self.index_value = {}
self.imputers = {}
self.scalers = {}
class ModelData:
def __init__(self, input_data, settings_filename=''):
self.input_data = input_data
self.prep_data = pd.DataFrame()
self.training_features = []
self.training_labels = []
self.validation_features = []
self.validation_labels = []
self.label_column = ''
self.label_type = 'binary'
self.key_column = '' # id column from input
self.category_columns = [] #columns are either category or numeric but not both
self.numeric_columns = []
self.sequence_columns = [] # sequence columns are also in category or numeric column list
self.sequence_length = 1 # max/pad length of sequences for training - all sequential cols have same length
self.sequence_pad = 'post' # which end of sequence to pad (pre/post)
self.value_index = {} # dictionary of dictionaries - key is column name
self.index_value = {} # above only in reverse
self.validation_split = .10
self.max_validation = 100000
self.imputers = {}
self.scalers = {}
if settings_filename != '':
self.load_settings(settings_filename)
def save_settings(self, filename):
# save modeldata settings to a file including column names and tokenization maps
# to do inference later, you will need to tokenize data for the model using
# the same token maps as were used during model training
# do not include file extension in filename - .pkl will be added
settings = ModelDataSettings()
settings.label_column = self.label_column
settings.label_type = self.label_type
settings.key_column = self.key_column
settings.category_columns = self.category_columns
settings.numeric_columns = self.numeric_columns
settings.sequence_columns = self.sequence_columns
settings.sequence_length = self.sequence_length
settings.sequence_pad = self.sequence_pad
settings.value_index = self.value_index
settings.index_value = self.index_value
settings.imputers = self.imputers
settings.scalers = self.scalers
with open(filename + '.pkl', 'wb') as output:
pickle.dump(settings, output, pickle.HIGHEST_PROTOCOL)
def load_settings(self, filename):
# load settings from file
# do not include file extension in filename - .pkl will be added
with open(filename + '.pkl', 'rb') as input:
settings = pickle.load(input)
self.label_column = settings.label_column
self.label_type = settings.label_type
self.key_column = settings.key_column
self.category_columns = settings.category_columns
self.numeric_columns = settings.numeric_columns
self.sequence_columns = settings.sequence_columns
self.sequence_length = settings.sequence_length
self.sequence_pad = settings.sequence_pad
self.value_index = settings.value_index
self.index_value = settings.index_value
self.imputers = settings.imputers
self.scalers = settings.scalers
def write_csv(self, column_list, filename):
# write data from prep_data to csv file
self.prep_data[column_list].to_csv(filename, index=False, quoting=csv.QUOTE_NONNUMERIC)
def unique_column_values(self, data_series, is_sequence=False):
# return a list of all unique values in series/column
# if each value is actually a list of values, handle that
unique_values = []
if is_sequence:
seq = []
for r in data_series.iteritems():
for v in r[1]:
seq.append(v)
unique_values = unique_values + list(set(seq))
else:
unique_values = unique_values + list(set(data_series))
# change nan/null values to a string - because nan can't be used as a dictionary key
unique_values = ['nan' if x != x else x for x in unique_values]
return unique_values
def column_values_to_index(self, data_series, column_name, is_sequence=False):
# take values in one column and changes them all to their zero-based index equivalent
# if each value is actually a list of values, handle that
# return a list of converted values
index_list = []
if is_sequence: # create a list of lists
for l in data_series:
seq = []
for v in l:
if v in self.value_index[column_name]:
seq.append(self.value_index[column_name][v])
else:
seq.append(1) # unknown value
index_list.append(seq)
else:
for v in data_series:
if v in self.value_index[column_name]:
index_list.append(self.value_index[column_name][v])
else:
index_list.append(1)
return index_list
def add_false_rows(self, deface_columns, swap_sequence=False, percent_of_sequence=.15):
# add negative samples to training data by defacing specific columns with false values
# swap_sequence=True means swap the entire sequence with another row; False means swap % of values in sequence
#
# label the new rows false (0)
print('Adding false rows')
#self.prep_data[self.label_column] = 1 # label all true examples - dont assume this
dfTrue = self.prep_data[self.prep_data[self.label_column]==1]
dfFalse = dfTrue.copy(deep=True) # copy all true examples as a starting point (dont use pandas copy)
dfFalse[self.label_column] = 0 # label all false examples
for col_name in deface_columns:
dfFalse[col_name] = self.deface_column(dfFalse[col_name], col_name in self.sequence_columns and swap_sequence==False, percent_of_sequence)
# add false rows to training data
self.prep_data = | pd.concat([self.prep_data, dfFalse], ignore_index=True) | pandas.concat |
import pytest
import pandas as pd
from cr.sparse import io
import jax.numpy as jnp
def test_print_dataframe_as_list_table():
d = {
"one": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"two": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"three": | pd.Series([1, 2, 3], index=["a", "b", "c"]) | pandas.Series |
from numpy import isnan
from pandas import read_csv, DataFrame
from sklearn.impute import SimpleImputer
# Load the data
df = read_csv('https://raw.githubusercontent.com/jbrownlee/Datasets/master/horse-colic.csv',
header=None,
na_values='?',)
# Show the first 5 rows of the data
df.head()
# Define X (Predictor variables) and y (Target variable)
dt = df.values
ix = [i for i in range(dt.shape[1]) if i != 27]
X, y = dt[:, ix], dt[:, 27]
# Show count of missing values of X (before imputation)
sum(isnan(X).flatten())
# Define imputer
imp = SimpleImputer(strategy='median')
# Fit and transform imputer on the dataset
Xtrans = imp.fit_transform(X)
# Show count of missing values of Xtrans (after imputation)
sum(isnan(Xtrans).flatten())
# Convert NumPy array to Pandas DataFrame
Xtrans = | DataFrame(data=Xtrans) | pandas.DataFrame |
import matplotlib.pyplot as plt
from pathlib import Path
import pandas as pd
import os
import numpy as np
def get_file_paths(file_directory):
file_paths = os.listdir(file_directory)
file_paths = list(filter(lambda f_path: os.path.isdir(file_directory / f_path), file_paths))
return file_paths
def plot_day(plot_directory, df_phases_day, sdp_name, start_time, df_comparison_values, plot_method, comparison_label):
sdp_directory = plot_directory / sdp_name
if not os.path.exists(sdp_directory):
os.makedirs(sdp_directory)
plt.figure(1)
plt.ylabel('Phases')
p_counter = 1
relevant_plot = False
transgressions_sum = 0
for df_p_day in df_phases_day:
if not df_p_day.empty:
transgressions = plot_method(df_p_day, p_counter)
transgressions_sum += transgressions
relevant_plot = relevant_plot or transgressions > 0
p_counter = p_counter + 1
if relevant_plot and not df_comparison_values.empty:
df_comparison_values.plot(figsize=(24, 6), linewidth=0.5, color='grey', label=comparison_label)
if relevant_plot:
legend = plt.legend(fontsize='x-large', loc='lower left')
for line in legend.get_lines():
line.set_linewidth(4.0)
plot_path = plot_directory / sdp_name / start_time
if relevant_plot:
plt.savefig(plot_path)
plt.close(1)
if transgressions_sum > 0:
print(start_time)
print(transgressions_sum)
return transgressions_sum
def plot_pickle_daywise(pickle_directory, plot_directory, plot_method, comparison_series_func):
transgression_sum = 0
nmbr_elements_sum = 0
file_paths = get_file_paths(pickle_directory)
print(file_paths)
for path in file_paths:
print(path)
comparison_label, df_comparison_values = comparison_series_func(path)
# df_mean_values = pd.read_pickle(pickle_directory/(path+'season_aggregation')).sort_index()
path = pickle_directory / Path(path)
df_phases = list(map(lambda p: pd.read_pickle(path / ("h_phase" + p)), ['1', '2', '3']))
nmbr_elements_sum += sum(map(lambda df: df.shape[0], df_phases))
day = pd.Timedelta('1d')
min_date = min(list(map(lambda df: df.index.min(), df_phases))).date()
max_date = max(list(map(lambda df: df.index.max(), df_phases))).date()
print(min_date)
print(max_date)
for start_time in pd.date_range(min_date, max_date, freq='d'):
end_time = start_time + day
# df_day = df.loc[df.index>start_time and df.index<end_time, :]
df_phases_day = list(map(lambda df: df.loc[start_time:end_time], df_phases))
df_comparison_values_day = df_comparison_values.loc[start_time:end_time]
# print(start_time.date())
transgression_sum += plot_day(plot_directory, df_phases_day, path.name, str(start_time.date()),
df_comparison_values_day, plot_method, comparison_label)
return transgression_sum, nmbr_elements_sum
def plot_station_dif_anomalies(pickle_directory, base_plot_directory, anomaly_threshold):
plot_directory = base_plot_directory / ("StationDif_" + str(anomaly_threshold).replace(".", "_"))
def plot_station_dif_v2(df_p_day, p_counter):
transgressions = list(np.where(abs(df_p_day.StationDif) > anomaly_threshold)[0])
df_p_day.Value.plot(figsize=(24, 6), linewidth=0.9, markevery=transgressions, marker='o',
markerfacecolor='black', label="phase" + str(p_counter))
return len(transgressions)
def comparison_series_func(station_name):
return "meanStationAverage", pd.read_pickle(pickle_directory / 'meanStationValues')
transgression_sum, nmbr_elements_sum = plot_pickle_daywise(pickle_directory, plot_directory, plot_station_dif_v2,
comparison_series_func)
print(transgression_sum)
print(nmbr_elements_sum)
ratio = transgression_sum / nmbr_elements_sum
print(ratio)
f = open(plot_directory / str(ratio), "w+")
f.close()
def plot_phase_dif_anomalies(pickle_directory, base_plot_directory, anomaly_threshold):
plot_directory = base_plot_directory / ("PhaseDif_" + str(anomaly_threshold).replace(".", "_"))
def plot_station_dif_v2(df_p_day, p_counter):
transgressions = list(np.where(abs(df_p_day.phase_dif) > anomaly_threshold)[0])
df_p_day.Value.plot(figsize=(24, 6), linewidth=0.9, markevery=transgressions, marker='o',
markerfacecolor='black', label="phase" + str(p_counter))
return len(transgressions)
def comparison_series_func(station_name):
return "", pd.DataFrame()
transgression_sum, nmbr_elements_sum = plot_pickle_daywise(pickle_directory, plot_directory, plot_station_dif_v2,
comparison_series_func)
print(transgression_sum)
print(nmbr_elements_sum)
ratio = transgression_sum / nmbr_elements_sum
print(ratio)
f = open(plot_directory / str(ratio), "w+")
f.close()
def plot_season_dif_anomalies(pickle_directory, base_plot_directory, anomaly_threshold):
# anomaly_threshold = 3.2270145810536146
plot_directory = base_plot_directory / ("SeasDif_" + str(anomaly_threshold).replace(".", "_"))
def plot_season_dif_v2(df_p_day, p_counter):
transgressions = list(np.where(abs(df_p_day.SeasDif) > anomaly_threshold)[0])
df_p_day.Value.plot(figsize=(24, 6), linewidth=0.9, markevery=transgressions, marker='o',
markerfacecolor='black', label="phase" + str(p_counter))
return len(transgressions)
def comparison_series_func(station_name):
return "meanSeasonalAverage", pd.read_pickle(
pickle_directory / (station_name + 'season_aggregation')).sort_index()
transgression_sum, nmbr_elements_sum = plot_pickle_daywise(pickle_directory, plot_directory, plot_season_dif_v2,
comparison_series_func)
print(transgression_sum)
print(nmbr_elements_sum)
ratio = transgression_sum / nmbr_elements_sum
print(ratio)
f = open(plot_directory / str(ratio), "w+")
f.close()
def plot_trafo_dif_anomalies(pickle_directory, base_plot_directory):
anomaly_threshold = 1.5
plot_directory = base_plot_directory / ("TrafoDif_" + str(anomaly_threshold).replace(".", "_"))
def plot_trafo_dif_v2(df_p_day, p_counter):
transgressions = list(np.where(abs(df_p_day.Value.diff()) > anomaly_threshold)[0])
df_p_day.Value.plot(figsize=(24, 6), linewidth=0.9, markevery=transgressions, marker='o',
markerfacecolor='black', label="phase" + str(p_counter))
return len(transgressions)
def comparison_series_func(station_name):
return "", pd.DataFrame()
transgression_sum, nmbr_elements_sum = plot_pickle_daywise(pickle_directory, plot_directory, plot_trafo_dif_v2,
comparison_series_func)
print(transgression_sum)
print(nmbr_elements_sum)
ratio = transgression_sum / nmbr_elements_sum
print(ratio)
f = open(plot_directory / str(ratio), "w+")
f.close()
def plot_trafo_dif_anomalies_v2(pickle_directory, base_plot_directory, anomaly_threshold):
plot_directory = base_plot_directory / ("TrafoDif_v2_" + str(anomaly_threshold).replace(".", "_"))
def plot_trafo_dif_v2(df_p_day, p_counter):
transgressions = list(np.where(abs(df_p_day.trafo) > anomaly_threshold)[0])
df_p_day.Value.plot(figsize=(24, 6), linewidth=0.9, markevery=transgressions, marker='o',
markerfacecolor='black', label="phase" + str(p_counter))
return len(transgressions)
def comparison_series_func(station_name):
return "", pd.DataFrame()
transgression_sum, nmbr_elements_sum = plot_pickle_daywise(pickle_directory, plot_directory, plot_trafo_dif_v2,
comparison_series_func)
print(transgression_sum)
print(nmbr_elements_sum)
ratio = transgression_sum / nmbr_elements_sum
print(ratio)
f = open(plot_directory / str(ratio), "w+")
f.close()
def plot_time_dif_anomalies(pickle_directory, base_plot_directory, anomaly_threshold):
plot_directory = base_plot_directory / ("TimeDif_" + str(anomaly_threshold).replace(".", "_"))
def plot_time_dif_v2(df_p_day, p_counter):
transgressions = list(np.where(abs(df_p_day.time_passed) > anomaly_threshold)[0])
df_p_day.Value.plot(figsize=(24, 6), linewidth=0.9, markevery=transgressions, marker='o',
markerfacecolor='black', label="phase" + str(p_counter))
return len(transgressions)
def comparison_series_func(station_name):
return "", pd.DataFrame()
transgression_sum, nmbr_elements_sum = plot_pickle_daywise(pickle_directory, plot_directory, plot_time_dif_v2,
comparison_series_func)
print(transgression_sum)
print(nmbr_elements_sum)
ratio = transgression_sum / nmbr_elements_sum
print(ratio)
f = open(plot_directory / str(ratio), "w+")
f.close()
def get_quintiles(pickle_directory, quantile):
file_paths = get_file_paths(pickle_directory)
print(file_paths)
aggregated_series = pd.Series()
for path in file_paths:
print(path)
path = pickle_directory / Path(path)
df_phases = list(map(lambda p: pd.read_pickle(path / ("h_phase" + p)), ['1', '2', '3']))
for df_p in df_phases:
ser = df_p.time_passed.reset_index(drop=True).abs()
aggregated_series = aggregated_series.append(ser, ignore_index=True)
threshold = aggregated_series.quantile(q=quantile)
print(threshold)
return threshold
def show_df2(pickle_name, pickle_dir=Path('pickles')):
path = pickle_dir / pickle_name
df_phases_h = list(map(lambda p: pd.read_pickle(path / ("h_phase" + p)), ['1', '2', '3']))
# df_phases = list(map(lambda p: pd.read_pickle(path / ("phase" + p)), ['1', '2', '3']))
df_p_h = df_phases_h[0][['Value']].rename(columns={'Value': 'p1'}).loc[
pd.datetime(2017, 4, 16):pd.datetime(2017, 4, 17)]
df_p_h['p2'] = df_phases_h[1][['Value']].loc[pd.datetime(2017, 4, 16):pd.datetime(2017, 4, 17)]
df_p_h['p3'] = df_phases_h[2][['Value']].loc[pd.datetime(2017, 4, 16):pd.datetime(2017, 4, 17)]
df_p_h['t1'] = df_phases_h[0][['trafo']].loc[pd.datetime(2017, 4, 16):pd.datetime(2017, 4, 17)]
df_p_h['t2'] = df_phases_h[1][['trafo']].loc[pd.datetime(2017, 4, 16):pd.datetime(2017, 4, 17)]
df_p_h['t3'] = df_phases_h[2][['trafo']].loc[pd.datetime(2017, 4, 16):pd.datetime(2017, 4, 17)]
df_p_dif = pd.DataFrame()
df_p_dif['p1'] = df_p_h['p1'].diff() / df_p_h['p1'].index.to_series().diff().dt.total_seconds()
df_p_dif['p2'] = df_p_h['p2'].diff() / df_p_h['p2'].index.to_series().diff().dt.total_seconds()
df_p_dif['p3'] = df_p_h['p3'].diff() / df_p_h['p3'].index.to_series().diff().dt.total_seconds()
df_p_dif_a = df_p_dif.loc[abs(df_p_dif['p1']) >= 0.15].loc[abs(df_p_dif['p2']) >= 0.15].loc[
abs(df_p_dif['p3']) >= 0.15]
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print(df_p_dif_a)
print(df_p_h)
def show_df(pickle_name, pickle_dir=Path('pickles')):
path = pickle_dir / pickle_name
df_phases_h = list(map(lambda p: pd.read_pickle(path / ("h_phase" + p)), ['1', '2', '3']))
# df_phases = list(map(lambda p: pd.read_pickle(path / ("phase" + p)), ['1', '2', '3']))
df_p_h = df_phases_h[0][['Value']].rename(columns={'Value': 'p1'}).loc[
pd.datetime(2017, 8, 7):pd.datetime(2017, 8, 8)]
df_p_h['p2'] = df_phases_h[1][['Value']].loc[ | pd.datetime(2017, 8, 7) | pandas.datetime |
import numpy as np
import pandas as pd
import statsmodels.api as sm
tsa = sm.tsa # as shorthand
mdata = sm.datasets.macrodata.load().data
type(mdata)
endog = np.log(mdata['m1'])
exog = np.column_stack([np.log(mdata['realgdp']), np.log(mdata['cpi'])])
exog = sm.add_constant(exog, prepend=True)
exog
res1 = sm.OLS(endog, exog).fit()
acf, ci, Q, pvalue = tsa.acf(res1.resid, nlags=4,alpha=.05, qstat=True,unbiased=True)
acf
pvalue
tsa.pacf(res1.resid, nlags=4)
#==============================================================================
# FILTER
#==============================================================================
from scipy.signal import lfilter
data = sm.datasets.macrodata.load()
infl = data.data.infl[1:]
data.data.shape
# get 4 qtr moving average
infl = lfilter(np.ones(4)/4, 1, infl)[4:]
unemp = data.data.unemp[1:]
#To apply the Hodrick-Prescott filter to the data 3, we can do
infl_c, infl_t = tsa.filters.hpfilter(infl)
unemp_c, unemp_t = tsa.filters.hpfilter(unemp)
#The Baxter-King filter 4 is applied as
infl_c = tsa.filters.bkfilter(infl)
unemp_c = tsa.filters.bkfilter(unemp)
#The Christiano-Fitzgerald filter is similarly applied 5
infl_c, infl_t = tsa.filters.cfilter(infl)
unemp_c, unemp_t = tsa.filters.cfilter(unemp)
#plot
INFLA=pd.DataFrame(infl_c,columns=['INFLA'])
UNEMP=pd.DataFrame(unemp_c[4:],columns=['UNEMP'])
pd.concat([INFLA,UNEMP],axis=1).plot()
INFLA=pd.DataFrame(infl_t,columns=['INFLA'])
UNEMP=pd.DataFrame(unemp_t[4:],columns=['UNEMP'])
pd.concat([INFLA,UNEMP],axis=1).plot()
#==============================================================================
# BENCHMARKING TO STANDARDISE LOWER FREQ TO HIGHER FREQ
#==============================================================================
iprod_m = np.array([ 87.4510, 86.9878, 85.5359, #INDUSTRIAL PRODUCTION INDEX
84.7761, 83.8658, 83.5261, 84.4347,
85.2174, 85.7983, 86.0163, 86.2137,
86.7197, 87.7492, 87.9129, 88.3915,
88.7051, 89.9025, 89.9970, 90.7919,
90.9898, 91.2427, 91.1385, 91.4039,
92.5646])
gdp_q = np.array([14049.7, 14034.5, 14114.7,14277.3, 14446.4, 14578.7, 14745.1,14871.4])
gdp_m = tsa.interp.dentonm(iprod_m, gdp_q,freq="qm")
a=[]
[a.extend([i]*4) for i in gdp_q]
x= | pd.DataFrame([iprod_m,gdp_m],index=['IPROD','GDP MONTHLY']) | pandas.DataFrame |
from collections import namedtuple
import numpy as np
import pandas as pd
import pytest
import statsmodels.api as sm
from estimagic.config import EXAMPLE_DIR
from estimagic.visualization.estimation_table import _apply_number_format
from estimagic.visualization.estimation_table import _check_order_of_model_names
from estimagic.visualization.estimation_table import _convert_frame_to_string_series
from estimagic.visualization.estimation_table import _create_group_to_col_position
from estimagic.visualization.estimation_table import _create_statistics_sr
from estimagic.visualization.estimation_table import _customize_col_groups
from estimagic.visualization.estimation_table import _customize_col_names
from estimagic.visualization.estimation_table import (
_get_default_column_names_and_groups,
)
from estimagic.visualization.estimation_table import _get_digits_after_decimal
from estimagic.visualization.estimation_table import _get_model_names
from estimagic.visualization.estimation_table import (
_get_params_frames_with_common_index,
)
from estimagic.visualization.estimation_table import _process_frame_indices
from estimagic.visualization.estimation_table import _process_model
from estimagic.visualization.estimation_table import estimation_table
from estimagic.visualization.estimation_table import render_html
from estimagic.visualization.estimation_table import render_latex
from pandas.testing import assert_frame_equal as afe
from pandas.testing import assert_series_equal as ase
from tests.visualization.helpers_test_estimation_table import (
_get_models_multiindex,
)
from tests.visualization.helpers_test_estimation_table import (
_get_models_multiindex_multi_column,
)
from tests.visualization.helpers_test_estimation_table import (
_get_models_single_index,
)
from tests.visualization.helpers_test_estimation_table import _read_csv_string
# test process_model for different model types
ProcessedModel = namedtuple("ProcessedModel", "params info name")
fix_path = EXAMPLE_DIR / "diabetes.csv"
df_ = pd.read_csv(fix_path, index_col=0)
est = sm.OLS(endog=df_["target"], exog=sm.add_constant(df_[df_.columns[0:4]])).fit()
est1 = sm.OLS(endog=df_["target"], exog=sm.add_constant(df_[df_.columns[0:5]])).fit()
def test_estimation_table():
models = [est]
res = estimation_table(models, return_type="render_inputs", append_notes=False)
exp = {}
body = """
index,target
const,152.00$^{*** }$
,(2.85)
Age,37.20$^{ }$
,(64.10)
Sex,-107.00$^{* }$
,(62.10)
BMI,787.00$^{*** }$
,(65.40)
ABP,417.00$^{*** }$
,(69.50)
"""
exp["body"] = _read_csv_string(body).fillna("")
exp["body"].set_index("index", inplace=True)
footer_str = """
,target
R$^2$,0.40
Adj. R$^2$,0.40
Residual Std. Error,60.00
F Statistic,72.90$^{***}$
Observations,442
"""
exp["footer"] = _read_csv_string(footer_str).fillna("")
exp["footer"].set_index(" ", inplace=True)
exp["footer"].index.names = [None]
exp["footer"].index = pd.MultiIndex.from_arrays([exp["footer"].index])
exp["notes_tex"] = "\\midrule\n"
exp[
"notes_html"
] = """<tr><td colspan="2" style="border-bottom: 1px solid black">
</td></tr>"""
afe(exp["footer"], res["footer"])
afe(exp["body"], res["body"], check_index_type=False)
MODELS = [
_get_models_multiindex(),
_get_models_single_index(),
_get_models_multiindex_multi_column(),
]
PARAMETRIZATION = [("latex", render_latex, models) for models in MODELS]
PARAMETRIZATION += [("html", render_html, models) for models in MODELS]
@pytest.mark.parametrize("return_type, render_func,models", PARAMETRIZATION)
def test_one_and_stage_rendering_are_equal(return_type, render_func, models):
first_stage = estimation_table(
models, return_type="render_inputs", confidence_intervals=True
)
second_stage = render_func(
siunitx_warning=False, alignment_warning=False, **first_stage
)
one_stage = estimation_table(
models,
return_type=return_type,
siunitx_warning=False,
alignment_warning=False,
confidence_intervals=True,
)
assert one_stage == second_stage
def test_process_model_namedtuple():
# checks that process_model doesn't alter values
df = pd.DataFrame(columns=["value", "p_value", "ci_lower", "ci_upper"])
df["value"] = np.arange(10)
df["p_value"] = np.arange(10)
df["ci_lower"] = np.arange(10)
df["ci_upper"] = np.arange(10)
info = {"stat1": 0, "stat2": 0}
name = "model_name"
model = ProcessedModel(params=df, info=info, name=name)
res = _process_model(model)
afe(res.params, df)
ase(pd.Series(res.info), pd.Series(info))
assert name == res.name
def test_process_model_stats_model():
params = pd.DataFrame(
columns=["value", "p_value", "standard_error", "ci_lower", "ci_upper"],
index=["const", "Age", "Sex", "BMI", "ABP"],
)
params["value"] = [152.133484, 37.241211, -106.577520, 787.179313, 416.673772]
params["p_value"] = [
2.048808e-193,
5.616557e-01,
8.695658e-02,
5.345260e-29,
4.245663e-09,
]
params["standard_error"] = [2.852749, 64.117433, 62.125062, 65.424126, 69.494666]
params["ci_lower"] = [146.526671, -88.775663, -228.678572, 658.594255, 280.088446]
params["ci_upper"] = [157.740298, 163.258084, 15.523532, 915.764371, 553.259097]
info = {}
info["rsquared"] = 0.40026108237714
info["rsquared_adj"] = 0.39477148130050055
info["fvalue"] = 72.91259907398705
info["f_pvalue"] = 2.700722880950139e-47
info["df_model"] = 4.0
info["df_resid"] = 437.0
info["resid_std_err"] = 59.97560860753488
info["n_obs"] = 442.0
res = _process_model(est)
afe(res.params, params)
ase(pd.Series(res.info), pd.Series(info))
assert res.name == "target"
def test_process_model_dict():
df = pd.DataFrame(columns=["value", "p_value", "standard_error"])
df["value"] = np.arange(10)
df["p_value"] = np.arange(10)
df["standard_error"] = np.arange(10)
info = {"stat1": 0, "stat2": 0}
mod = {}
mod["params"] = df
mod["info"] = info
res = _process_model(mod)
afe(res.params, mod["params"])
ase(pd.Series(res.info), pd.Series(mod["info"]))
# test convert_model_to_series for different arguments
def test_convert_model_to_series_with_ci():
df = pd.DataFrame(
np.array(
[[0.6, 2.3, 3.3], [0.11, 0.049, 0.009], [0.6, 2.3, 3.3], [1.2, 3.3, 4.33]]
).T,
columns=["value", "p_value", "ci_lower", "ci_upper"],
index=["a", "b", "c"],
).astype("str")
df["p_value"] = df["p_value"].astype("float")
significance_levels = [0.1, 0.05, 0.01]
show_stars = True
res = _convert_frame_to_string_series(df, significance_levels, show_stars)
exp = pd.Series(
[
"0.6$^{ }$",
r"(0.6;1.2)",
"2.3$^{** }$",
r"(2.3;3.3)",
"3.3$^{*** }$",
r"(3.3;4.33)",
],
index=["a", "", "b", "", "c", ""],
name="",
)
exp.index.name = "index"
ase(exp, res)
def test_convert_model_to_series_with_se():
df = pd.DataFrame(
np.array([[0.6, 2.3, 3.3], [0.11, 0.049, 0.009], [0.6, 2.3, 3.3]]).T,
columns=["value", "p_value", "standard_error"],
index=["a", "b", "c"],
).astype("str")
df["p_value"] = df["p_value"].astype("float")
significance_levels = [0.1, 0.05, 0.01]
show_stars = True
res = _convert_frame_to_string_series(df, significance_levels, show_stars)
exp = pd.Series(
["0.6$^{ }$", "(0.6)", "2.3$^{** }$", "(2.3)", "3.3$^{*** }$", "(3.3)"],
index=["a", "", "b", "", "c", ""],
name="",
)
exp.index.name = "index"
ase(exp, res)
def test_convert_model_to_series_without_inference():
df = pd.DataFrame(
np.array([[0.6, 2.3, 3.3], [0.11, 0.049, 0.009]]).T,
columns=["value", "p_value"],
index=["a", "b", "c"],
).astype("str")
df["p_value"] = df["p_value"].astype("float")
significance_levels = [0.1, 0.05, 0.01]
show_stars = True
res = _convert_frame_to_string_series(df, significance_levels, show_stars)
exp = pd.Series(
["0.6$^{ }$", "2.3$^{** }$", "3.3$^{*** }$"], index=["a", "b", "c"], name=""
)
ase(exp, res)
# test create stat series
def test_create_statistics_sr():
df = pd.DataFrame(np.empty((10, 3)), columns=["a", "b", "c"])
df.index = pd.MultiIndex.from_arrays(np.array([np.arange(10), np.arange(10)]))
info = {"rsquared": 0.45, "n_obs": 400, "rsquared_adj": 0.0002}
number_format = ("{0:.3g}", "{0:.5f}", "{0:.4g}")
add_trailing_zeros = True
sig_levels = [0.1, 0.2]
show_stars = False
model = ProcessedModel(params=df, info=info, name="target")
stats_options = {
"n_obs": "Observations",
"rsquared": "R2",
"rsquared_adj": "R2 Adj.",
}
res = _create_statistics_sr(
model,
stats_options,
sig_levels,
show_stars,
number_format,
add_trailing_zeros,
max_trail=4,
)
exp = pd.Series(["0.4500", "0.0002", "400"])
exp.index = pd.MultiIndex.from_arrays(
np.array([np.array(["R2", "R2 Adj.", "Observations"]), np.array(["", "", ""])])
)
ase(exp, res)
# test _process_frame_axes for different arguments
def test_process_frame_indices_index():
df = pd.DataFrame(np.ones((3, 3)), columns=["", "", ""])
df.index = pd.MultiIndex.from_arrays(
np.array([["today", "today", "today"], ["var1", "var2", "var3"]])
)
df.index.names = ["l1", "l2"]
par_name_map = {"today": "tomorrow", "var1": "1stvar"}
index_name_map = ["period", "variable"]
column_names = list("abc")
res = _process_frame_indices(
df,
custom_param_names=par_name_map,
custom_index_names=index_name_map,
column_names=column_names,
show_col_names=True,
show_col_groups=False,
column_groups=None,
)
# expected:
params = """
period,variable,a,b,c
tomorrow,1stvar,1,1,1
tomorrow,var2,1,1,1
tomorrow,var3,1,1,1
"""
exp = _read_csv_string(params).fillna("")
exp.set_index(["period", "variable"], inplace=True)
afe(res, exp, check_dtype=False)
def test_process_frame_indices_columns():
df = pd.DataFrame(np.ones((3, 3)), columns=["", "", ""])
col_names = list("abc")
col_groups = ["first", "first", "second"]
res = _process_frame_indices(
df=df,
custom_index_names=None,
custom_param_names=None,
show_col_groups=True,
show_col_names=True,
column_names=col_names,
column_groups=col_groups,
)
arrays = [np.array(col_groups), np.array(col_names)]
exp = pd.DataFrame(data=np.ones((3, 3)), columns=arrays)
afe(res, exp, check_dtype=False)
def test_apply_number_format_tuple():
number_format = ("{0:.2g}", "{0:.2f}", "{0:.2g}")
raw = pd.DataFrame(data=[1234.2332, 0.0001])
exp = pd.DataFrame(data=["1.2e+03", "0"])
res = _apply_number_format(df=raw, number_format=number_format)
afe(exp, res)
def test_apply_number_format_int():
number_format = 3
raw = pd.DataFrame(data=["1234.2332", "1.2e+03"])
exp = pd.DataFrame(data=["1234.233", "1.2e+03"])
res = _apply_number_format(df=raw, number_format=number_format)
afe(exp, res)
def test_apply_number_format_callable():
def nsf(num, n=3):
"""n-Significant Figures"""
numstr = ("{0:.%ie}" % (n - 1)).format(num)
return numstr
raw = pd.DataFrame(data=[1234.2332, 0.0001])
exp = pd.DataFrame(data=["1.23e+03", "1.00e-04"])
res = _apply_number_format(df=raw, number_format=nsf)
afe(exp, res)
def test_get_digits_after_decimal():
df = pd.DataFrame(
data=[["12.456", "0.00003", "1.23e+05"], ["16", "0.03", "1.2e+05"]]
).T
exp = 5
res = _get_digits_after_decimal(df)
assert exp == res
def test_create_group_to_col_position():
col_groups = [
"a_name",
"a_name",
"a_name",
"second_name",
"second_name",
"third_name",
]
exp = {"a_name": [0, 1, 2], "second_name": [3, 4], "third_name": [5]}
res = _create_group_to_col_position(col_groups)
assert exp == res
def test_get_model_names():
m1 = ProcessedModel(params=None, info=None, name="a_name")
m3 = ProcessedModel(params=None, info=None, name=None)
m5 = ProcessedModel(params=None, info=None, name="third_name")
models = [m1, m3, m5]
res = _get_model_names(models)
exp = ["a_name", "(2)", "third_name"]
assert res == exp
def test_get_default_column_names_and_groups():
model_names = ["a_name", "a_name", "(3)", "(4)", "third_name"]
res_names, res_groups = _get_default_column_names_and_groups(model_names)
exp_names = [f"({i+1})" for i in range(len(model_names))]
exp_groups = ["a_name", "a_name", "(3)", "(4)", "third_name"]
assert res_names == exp_names
assert res_groups == exp_groups
def test_get_default_column_names_and_groups_undefined_groups():
model_names = ["a_name", "second_name", "(3)", "(4)", "third_name"]
res_names, res_groups = _get_default_column_names_and_groups(model_names)
exp_names = model_names
assert res_names == exp_names
assert pd.isna(res_groups)
def test_customize_col_groups():
default = ["a_name", "a_name", "(3)", "(4)", "third_name"]
mapping = {"a_name": "first_name", "third_name": "fifth_name"}
exp = ["first_name", "first_name", "(3)", "(4)", "fifth_name"]
res = _customize_col_groups(default, mapping)
assert exp == res
def test_customize_col_names_dict():
default = list("abcde")
custom = {"a": "1", "c": "3", "e": "5"}
res = _customize_col_names(default_col_names=default, custom_col_names=custom)
exp = ["1", "b", "3", "d", "5"]
assert exp == res
def test_customize_col_names_list():
default = list("abcde")
custom = list("12345")
res = _customize_col_names(default_col_names=default, custom_col_names=custom)
exp = ["1", "2", "3", "4", "5"]
assert exp == res
def test_get_params_frames_with_common_index():
m1 = ProcessedModel(
params=pd.DataFrame(np.ones(5), index=list("abcde")), info=None, name=None
)
m2 = ProcessedModel(
params=pd.DataFrame(np.ones(3), index=list("abc")), info=None, name=None
)
res = _get_params_frames_with_common_index([m1, m2])
exp = [
pd.DataFrame(np.ones(5), index=list("abcde")),
pd.DataFrame(
np.concatenate([np.ones(3), np.ones(2) * np.nan]), index=list("abcde")
),
]
afe(res[0], exp[0])
afe(res[1], exp[1])
def test_get_params_frames_with_common_index_multiindex():
mi = pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1), ("b", 2), ("b", 3)])
m1 = ProcessedModel(params=pd.DataFrame(np.ones(5), index=mi), info=None, name=None)
m2 = ProcessedModel(
params=pd.DataFrame(np.ones(3), index=mi[:3]), info=None, name=None
)
res = _get_params_frames_with_common_index([m1, m2])
exp = [
pd.DataFrame(np.ones(5), index=mi),
pd.DataFrame(np.concatenate([np.ones(3), np.ones(2) * np.nan]), index=mi),
]
afe(res[0], exp[0])
| afe(res[1], exp[1]) | pandas.testing.assert_frame_equal |
import pandas as pd
import io
import requests
import json
import wbdata
class ProductoInternoBruto:
def __init__(self):
pass
def getPreciosCorrientesBase2004(self, periodo = "Anual"):
"""
El PIB es el valor total de bienes y servicios FINALES producidos en
un pais durante un periodo de tiempo determinado.
Este PBI se encuentra calculado a precios de mercado lo que significa
que incluye impuestos y subsidios.
Este PIB toma los precios de bienes y servicios del año que se esta
calculando ejemplo: el PIB Corriente el 2020 toma los precios del 2020
Este tipo de PBI no es bueno para comparar, si se lo divide por el
"Indice de Precios Implicitos" o "Deflactor del PBI" se obtiene el
PBIPreciosConstantes
Parameters
----------
periodo : str, optional (puede ser "Anual" o "Trimestral")
DESCRIPTION. The default is "Anual".
Returns
-------
pd.DataFrame()
"""
#Obtener la url de descarga del cvs
urlPackage="https://datos.gob.ar/api/3/action/package_show?id=sspm-producto-interno-bruto-precios-mercado-precios-corrientes-base-2004"
s=requests.get(urlPackage).content
objJson = json.loads(s)
resultado = objJson['result']['resources']
selector = 1 if periodo == 'Trimestral' else 0 #si no es trimestral siempre es anual
ultimoResultado = resultado[selector]
urlDescarga = ultimoResultado['url']
descripcion = ultimoResultado['description']
print("Descargando: {}".format(descripcion))
print("Archivo: {}".format(urlDescarga))
#Descargar la url con cvs y generar pandas dataframe
contenidoCVS = requests.get(urlDescarga).content
flujoCVS = io.StringIO(contenidoCVS.decode('utf-8'))
df_temp = pd.read_csv(flujoCVS)
#transform string to datetime
df_temp['indice_tiempo'] = | pd.to_datetime(df_temp['indice_tiempo'], format='%Y-%m-%d', errors='ignore') | pandas.to_datetime |
""" plotting functions for Dataset objects
To Do:
Edit hyp_stats plots to take transitions.HypStats object instead of ioeeg.Dataset object
Remove redundant plotting fns added into EKG classs
Add subsetEEG function to break up concatenated NREM segments for plotting. Will require adjustments
to specified detections added to plot.
"""
import itertools
import igraph as ig
import math
import matplotlib
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import shapely.geometry as SG
from matplotlib.widgets import Slider
from pandas.plotting import register_matplotlib_converters
from scipy.signal import find_peaks, butter, sosfiltfilt
from scipy import interpolate
register_matplotlib_converters()
def plotEEG(d, raw=True, filtered=False, spindles=False, spindle_rejects=False):
""" plot multichannel EEG w/ option for double panel raw & filtered. For short, pub-ready
figures. Use vizeeg for data inspection
red = spindle rejects by time domain criteria; dark red = spindle rejects by frequency domain criteria
Parameters
----------
d: instance of ioeeg Dataset class
raw: bool, optional, default: True
Option to plot raw EEG
filtered: bool, optional, default: False
Option to plot filtered EEG
spindles: bool, optional, default: False
Option to plot spindle detections
spindle_rejects: bool, optional, default: False
Option to plot rejected spindle detections
Returns
-------
matplotlib.pyplot figure instance
"""
data = []
title = []
# import data
if raw == True:
raw = d.data
data.append(raw)
title.append('Raw')
if filtered == True:
filtd = d.spindle_calcs.loc(axis=1)[:, 'Filtered']
data.append(filtd)
title.append('Filtered')
# flatten events list by channel for plotting
if spindles == True:
sp_eventsflat = [list(itertools.chain.from_iterable(d.spindle_events[i])) for i in d.spindle_events.keys()]
if spindle_rejects == True:
sp_rej_t_eventsflat = [list(itertools.chain.from_iterable(d.spindle_rejects_t[i])) for i in d.spindle_rejects_t.keys()]
sp_rej_f_eventsflat = [list(itertools.chain.from_iterable(d.spindle_rejects_f[i])) for i in d.spindle_rejects_f.keys()]
# set channels for plotting
channels = [x[0] for x in d.data.columns]
# plot data
fig, axs = plt.subplots(len(data), 1, sharex=True, figsize=(10,10), squeeze=False)
fig.subplots_adjust(hspace=.1, top=.9, bottom=.1, left=.05, right=.95)
for dat, ax, t in zip(data, axs.flatten(), title):
for i, c in enumerate(channels):
# normalize each channel to [0, 1]
dat_ser = pd.Series(dat[(c, t)], index=dat.index)
norm_dat = (dat_ser - min(dat_ser))/(max(dat_ser)-min(dat_ser)) - i # subtract i for plotting offset
ax.plot(norm_dat, linewidth=.5, color='C0')
# plot spindles
if spindles == True:
sp_events_TS = [pd.Timestamp(x) for x in sp_eventsflat[i]]
spins = pd.Series(index=norm_dat.index)
spins[sp_events_TS] = norm_dat[sp_events_TS]
ax.plot(spins, color='orange', alpha=0.5)
if spindle_rejects == True:
# plot time-domain rejects
sp_rejs_t_TS = [pd.Timestamp(x) for x in sp_rej_t_eventsflat[i]]
spin_rejects_t = pd.Series(index=norm_dat.index)
spin_rejects_t[sp_rejs_t_TS] = norm_dat[sp_rejs_t_TS]
ax.plot(spin_rejects_t, color='red', alpha=0.5)
# plot frequency-domain rejects
sp_rejs_f_TS = [pd.Timestamp(x) for x in sp_rej_f_eventsflat[i]]
spin_rejects_f = pd.Series(index=norm_dat.index)
spin_rejects_f[sp_rejs_f_TS] = norm_dat[sp_rejs_f_TS]
ax.plot(spin_rejects_f, color='darkred', alpha=0.5)
ax.set_title(t)
ax.set_yticks(list(np.arange(0.5, -(len(channels)-1), -1)))
ax.set_yticklabels(channels)
ax.margins(x=0) # remove white space margins between data and y axis
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
# set overall parameters
fig.suptitle(d.metadata['file_info']['in_num'])
plt.xlabel('Time')
return fig, axs
def plotEEG_singlechan(d, chan, raw=True, filtered=False, rms=False, thresholds=False, spindles=False, spindle_rejects=False):
""" plot single channel EEG. Options for multipaneled calculations. Not for concatenated datasets
Parameters
----------
d: instance of ioeeg Dataset class
chan: str
channel to plot
raw: bool, optional, default: True
Option to plot raw EEG panel
filtered: bool, optional, default: False
Option to plot filtered EEG panel
rms: bool, optional, default: False
Option to plot filtered EEG panel with RMS and RMS moving average
thresholds: bool, optional, default: False
Option to plot spindle threshold lines on rms panel
spindles: bool, optional, default: False
Option to plot filtered EEG with spindle detection panel
spindle_rejects: bool, optional, default: False
Option to plot filtered EEG with spindle rejection panel.
Note: Spindles and spindle_rejects plot on same panel if
both True
Returns
-------
matplotlib.pyplot figure instance
"""
data = []
dtype = []
labels = []
c = chan
# import data
if raw == True:
raw_data = d.data[c, 'Raw']
if filtered == True or rms == True or spindles == True or spindle_rejects == True:
filtd_data = d.spindle_calcs.loc(axis=1)[c, 'Filtered']
# set data to plot
if raw == True:
#raw = d.data[c, 'Raw']
data.append(raw_data)
dtype.append('raw')
labels.append('Raw Signal')
if filtered == True:
#filtd = d.spindle_calcs.loc(axis=1)[c, 'Filtered']
data.append(filtd_data)
dtype.append('filtd')
labels.append('Filtered Signal')
if rms == True:
data.append(filtd_data)
dtype.append('filtd+rms')
labels.append('Filtered Signal')
if spindles == True or spindle_rejects == True:
data.append(filtd_data)
labels.append('Filtered Signal')
if spindles == True and spindle_rejects == False:
dtype.append('filtd+spin')
elif spindles == False and spindle_rejects == True:
dtype.append('filtd+rej')
elif spindles == True and spindle_rejects == True:
dtype.append('filtd+spin+rej')
# pull out thresholds for labels
loSD = d.metadata['spindle_analysis']['sp_loSD']
hiSD = d.metadata['spindle_analysis']['sp_hiSD']
# plot data
fig, axs = plt.subplots(len(data), 1, sharex=True, figsize=(18,6), squeeze=False)
fig.subplots_adjust(hspace=.1, top=.9, bottom=.1, left=.05, right=.95)
for dat, ax, dt, label in zip(data, axs.flatten(), dtype, labels):
# plot EEG
ax.plot(dat, linewidth=.5, color='C0', label=label)
# plot filtered EEG w/ rms & thresholds
if dt == 'filtd+rms':
ax.plot(d.spRMS[c], label='RMS', color='green')
ax.plot(d.spRMSmavg[c], label='RMS moving average', color='orange')
if dt == 'filtd+rms' and thresholds == True:
ax.axhline(d.spThresholds[c].loc['Low Threshold'], linestyle='solid', color='grey', label = f'Mean RMS + {loSD} SD')
ax.axhline(d.spThresholds[c].loc['High Threshold'], linestyle='dashed', color='grey', label = f'Mean RMS + {hiSD} SD')
# plot spindles
if dt =='filtd+spin' or dt =='filtd+spin+rej':
sp_valuesflat = []
sp_eventsflat = []
for n in range(len(d.spindle_events[c])):
for m in range(len(d.spindle_events[c][n])):
sp_valuesflat.append(dat[d.spindle_events[c][n][m]])
sp_eventsflat.append(d.spindle_events[c][n][m])
sp_events_TS = [pd.Timestamp(x) for x in sp_eventsflat]
spins = pd.Series(index=dat.index)
spins[sp_events_TS] = dat[sp_events_TS]
ax.plot(spins, color='orange', alpha=0.5, label='Spindle Detection')
# plot spindle rejections
if dt == 'filtd+rej' or dt == 'filtd+spin+rej':
# plot time-domain rejects
sp_rej_t_valuesflat = []
sp_rej_t_eventsflat = []
for n in range(len(d.spindle_rejects_t[c])):
for m in range(len(d.spindle_rejects_t[c][n])):
sp_rej_t_valuesflat.append(dat[d.spindle_rejects_t[c][n][m]])
sp_rej_t_eventsflat.append(d.spindle_rejects_t[c][n][m])
sp_rej_t_events_TS = [pd.Timestamp(x) for x in sp_rej_t_eventsflat]
spin_rejects_t = pd.Series(index=dat.index)
spin_rejects_t[sp_rej_t_events_TS] = dat[sp_rej_t_events_TS]
ax.plot(spin_rejects_t, color='red', alpha=0.5, label='Rejected Detection (T)')
# plot frequency-domain rejects
sp_rej_f_valuesflat = []
sp_rej_f_eventsflat = []
for n in range(len(d.spindle_rejects_f[c])):
for m in range(len(d.spindle_rejects_f[c][n])):
sp_rej_f_valuesflat.append(dat[d.spindle_rejects_f[c][n][m]])
sp_rej_f_eventsflat.append(d.spindle_rejects_f[c][n][m])
sp_rej_f_events_TS = [pd.Timestamp(x) for x in sp_rej_f_eventsflat]
spin_rejects_f = pd.Series(index=dat.index)
spin_rejects_f[sp_rej_f_events_TS] = dat[sp_rej_f_events_TS]
ax.plot(spin_rejects_f, color='darkred', alpha=0.5, label='Rejected Detection (F)')
ax.legend(loc='lower left')
#ax.set_title(t)
#ax.set_yticks(list(np.arange(0.5, -(len(chan)-1), -1)))
#ax.set_yticklabels(chan)
ax.margins(x=0) # remove white space margins between data and y axis
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
# plot minor axes
seconds = mdates.SecondLocator()
ax.xaxis.set_minor_locator(seconds)
ax.grid(axis='x', which='minor', linestyle=':')
ax.grid(axis='x', which='major')
# set overall parameters
fig.suptitle(d.metadata['file_info']['in_num'])
plt.xlabel('Time')
return fig
def vizeeg(d, raw=True, filtered=False, spindles=False, spindle_rejects=False, slider=True, win_width=15, raw_lowpass=True,
lowpass_freq=25, lowpass_order=4):
""" vizualize multichannel EEG w/ option for double panel raw and/or filtered. Optimized for
inspecting spindle detections (title/axis labels removed for space)
Spindles rejected based on time-domain criteria are plotted in red; rejections based on
frequency-domain criteria are plotted in darkred.
Parameters
----------
d: instance of ioeeg Dataset class
raw: bool, optional, default: True
Option to plot raw EEG
filtered: bool, optional, default: False
Option to plot spindle filtered EEG
spindles: bool, optional, default: False
Option to plot spindle detections
spindle_rejects: bool, optional, default: False
Option to plot rejected spindle detections
slider: bool (default: False)
Option to implement an X-axis slider instead of built-in matplotlib zoom. Useful
for inspecting long segments of EEG with a set window
win_width: int (default: 15)
If using slider option, number of seconds to set window width
raw_lowpass: bool (default: True)
Whether to plot the lowpass filtered raw data [in place of the unchanged raw data]
lowpass_freq: int (default: 25)
Frequency to lowpass the raw data for visualization (if not already applied)
lowpass_order: int (default: 4)
Butterworth lowpass filter order to be used if lowpass_raw is not None (doubles for filtfilt)
Returns
-------
matplotlib.pyplot figure instance
"""
# Set figure size (double height if plotting both raw & filtered)
if raw == True & filtered == True:
figsize = (14, 14)
else:
figsize = (14, 7)
data = []
title = []
# import data
if raw == True:
if not raw_lowpass:
# use the unchanged raw data
raw_data = d.data
elif raw_lowpass:
# use the lowpass filtered raw data
try:
# check if filtered data exists
raw_lowpass_data = d.data_lowpass
except AttributeError:
# apply lowpass filter
d.lowpass_raw(lowpass_freq, lowpass_order)
raw_lowpass_data = d.data_lowpass
if filtered == True:
filtd = d.spindle_calcs.loc(axis=1)[:, 'Filtered']
# set data to plot (title corresponds to multiindex level 2 in data df)
if raw == True:
if not raw_lowpass:
# plot the unchanged data
data.append(raw_data)
title.append('Raw')
elif raw_lowpass:
# plot the lowpass data
data.append(raw_lowpass_data)
title.append('raw_lowpass')
if filtered == True:
data.append(filtd)
title.append('Filtered')
# flatten events list by channel for plotting
if spindles == True:
sp_eventsflat = [list(itertools.chain.from_iterable(d.spindle_events[i])) for i in d.spindle_events.keys()]
if spindle_rejects == True:
# time-domain rejects
sp_rej_t_eventsflat = [list(itertools.chain.from_iterable(d.spindle_rejects_t[i])) for i in d.spindle_rejects_t.keys()]
# frequency domain rejects
sp_rej_f_eventsflat = [list(itertools.chain.from_iterable(d.spindle_rejects_f[i])) for i in d.spindle_rejects_f.keys()]
# set channels for plotting
channels = [x[0] for x in d.data.columns if x[0] not in ['EKG', 'EOG_L', 'EOG_R']]
# set offset multiplier (distance between channels in plot)
mx = 0.1
# plot data
fig, axs = plt.subplots(len(data), 1, sharex=True, figsize=figsize, squeeze=False)
fig.subplots_adjust(hspace=.1, top=.9, bottom=.1, left=.05, right=.95)
yticks = []
for dat, ax, t in zip(data, axs.flatten(), title):
for i, c in enumerate(channels):
# normalize each channel to [0, 1] -> can also simply subtract the mean (cleaner looking), but
# normalization preserves relative differences between channels while putting them on a common scale
dat_ser = pd.Series(dat[(c, t)], index=dat.index)
norm_dat = (dat_ser - min(dat_ser))/(max(dat_ser)-min(dat_ser)) - i*mx # subtract i for plotting offset
yticks.append(np.nanmedian(norm_dat))
ax.plot(norm_dat, linewidth=.5, color='C0')
# plot spindles
if spindles == True:
sp_events_TS = [pd.Timestamp(x) for x in sp_eventsflat[i]]
spins = pd.Series(index=norm_dat.index)
spins[sp_events_TS] = norm_dat[sp_events_TS]
ax.plot(spins, color='orange', alpha=0.5)
if spindle_rejects == True:
# plot time-domain rejects
sp_rejs_t_TS = [pd.Timestamp(x) for x in sp_rej_t_eventsflat[i]]
spin_t_rejects = pd.Series(index=norm_dat.index)
spin_t_rejects[sp_rejs_t_TS] = norm_dat[sp_rejs_t_TS]
ax.plot(spin_t_rejects, color='red', alpha=0.5)
# plot frequency-domain rejects
sp_rejs_f_TS = [pd.Timestamp(x) for x in sp_rej_f_eventsflat[i]]
spin_f_rejects = | pd.Series(index=norm_dat.index) | pandas.Series |
# -*- coding: utf-8 -*-
# Run this app with `python app.py` and
# visit http://127.0.0.1:8050/ in your web browser.
#AppAutomater.py has App graphs and data
#Graphs.py has all graphs
#Data.py has all data processing stuff
#Downloader.py is used to download files daily
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output
#from apscheduler.schedulers.background import BackgroundScheduler
#import atexit
import plotly.express as px
import json
import numpy as np
import pandas as pd
from pymongo import MongoClient
#Scheduler to update data
###########################################################################
###########################################################################
def g():
#client =
#db =
#collection =
#Read Only Needed Data
###########################################################################
###########################################################################
grouped_daily_cities = collection.find_one({"index":"grouped_daily_cities"})
grouped_daily_cities = pd.DataFrame(grouped_daily_cities["data"])
grouped_cumulative_cities = collection.find_one({"index":"grouped_cumulative_cities"})
grouped_cumulative_cities = pd.DataFrame(grouped_cumulative_cities["data"])
g.grouped_daily_weekly = collection.find_one({"index":"grouped_daily_weekly"})
g.grouped_daily_weekly = pd.DataFrame(g.grouped_daily_weekly["data"])
df = collection.find_one({"index":"df"})
df = pd.DataFrame(df["data"])
# df=pd.read_csv('Data/df.csv')
df_Total = collection.find_one({"index":"df_Total"})
df_Total = pd.DataFrame(df_Total["data"])
# df_Total=pd.read_csv('Data/Total.csv')
g.grouped_daily_regions = collection.find_one({"index":"grouped_daily_regions"})
g.grouped_daily_regions = | pd.DataFrame(g.grouped_daily_regions["data"]) | pandas.DataFrame |
#!/usr/bin/env python
__author__ = '<NAME>'
import argparse
import multiprocessing as mp
from collections import OrderedDict
import pandas as pd
from RouToolPa.Parsers.Sequence import CollectionSequence
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_file_list", action="store", dest="input_file_list", required=True,
type=lambda s: s.split(","),
help="Comma-separated list of files with different assemblies")
parser.add_argument("-l", "--labels_list", action="store", dest="labels_list",
type=lambda s: s.split(","),
help="Comma-separated list of assembly labels. Should have same length as list of "
"input files with assemblies. Default - not set, assemblies will be named like A1, A2, ../ ")
parser.add_argument("-e", "--thresholds", action="store", dest="thresholds", default=[0, 100, 250, 500, 1000],
type=lambda s: map(int, s.split(",")),
help="Comma-separated list of thresholds for N50 calculations. "
"Default: 0,100,250,500,1000")
parser.add_argument("-o", "--output_prefix", action="store", dest="output_prefix", required=True,
help="Prefix of output files")
parser.add_argument("-f", "--format", action="store", dest="format", default="fasta",
help="Format of input files")
args = parser.parse_args()
if args.labels_list is not None:
if len(args.labels_list) != len(args.input_file_list):
raise ValueError("Length of labels list is not equal to number of files with assemblies")
assemblies_dict = OrderedDict()
stats_dict = OrderedDict({"N50": | pd.DataFrame() | pandas.DataFrame |
import argparse
import mplfinance as mpf
import numba as nb
import os
import pandas as pd
from pandas_datareader import data, wb
from pandas_datareader.nasdaq_trader import get_nasdaq_symbols
from pandas.tseries.holiday import USFederalHolidayCalendar
from pandas.tseries.frequencies import to_offset
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator
from datetime import datetime, timedelta
import pytz
import sys
# %matplotlib inline
plt.rcParams['figure.figsize'] = [15, 15]
plt.style.use('ggplot')
# plt.style.use('seaborn')
from matplotlib.ticker import Formatter
class WeekdayDateFormatter(Formatter):
# https://matplotlib.org/gallery/ticks_and_spines/date_index_formatter.html
# the data is first plotted against an integer. The formatter changes the integer to the correct date.
def __init__(self, dates, fmt='%Y-%m-%d'):
self.dates = dates
self.fmt = fmt
def __call__(self, x, pos=0):
'Return the label for time x at position pos'
ind = int(round(x))
if ind >= len(self.dates) or ind < 0:
return ''
return (self.dates[ind]).strftime(self.fmt)
def search_stock_symbols(stock):
fn = 'symbols.csv'
if not os.path.exists(fn):
symbols = get_nasdaq_symbols()
symbols.to_csv(fn, index='Symbol')
else:
symbols = pd.read_csv(fn, index_col='Symbol')
if stock is None:
return symbols
stock = stock.upper()
hard_search = symbols[symbols['NASDAQ Symbol'] == stock]
if len(hard_search) == 1:
return 1, symbols[symbols['NASDAQ Symbol'] == stock]['Security Name'][stock]
else:
found = symbols[symbols['NASDAQ Symbol'].str.contains(stock)]
if found.empty:
return 0, None
else:
return len(found), found
def valid_time(arg):
try:
return datetime.strptime(arg, "%H:%M")
except ValueError:
msg = "Not a valid time: '{0}'.".format(arg)
raise argparse.ArgumentTypeError(msg)
def valid_date(arg):
try:
dt = datetime.strptime(arg, "%m/%d/%Y")
except ValueError:
msg = 'Not a valid date: "{0}".'.format(arg)
raise argparse.ArgumentTypeError(msg)
if dt.date() > datetime.now().date():
dt = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
msg = f'''A future date is not valid: "{arg}". Instead using "{dt.date().strftime('%m/%d/%Y')}"'''
print(msg)
return dt
def cli_parameters():
parser = argparse.ArgumentParser()
parser.add_argument('stock', metavar='N', type=str, nargs='*', help='create reports for all stocks entered')
parser.add_argument('--compare', action='store_true', default=False, help='compare the list of stocks')
parser.add_argument('--bb', action='store_true', default=False, help='show Bollinger Bands on stock chart')
parser.add_argument('--macd', action='store_true', default=False, help='show Moving Average Convergence/Divergence on separate chart')
parser.add_argument('--sto', action='store_true', default=False, help='show Stochastic on separate chart')
parser.add_argument('--rsi', action='store_true', default=False, help='show Relative Srength Index on separate chart')
parser.add_argument('--cmf', action='store_true', default=False, help='show Chaikin Money Flow on separate chart')
parser.add_argument('--best', action='store_true', default=False, help='show BB, MACD, and RSI')
parser.add_argument('--save', action='store_true', default=False, help='Save plot to disk')
parser.add_argument('--show', action='store_true', default=False, help='Show interactive plot')
parser.add_argument('--weekly', action='store_true', default=False, help='Resample data into weekly charts')
parser.add_argument("--startdate", help="Start date - format MM/DD/YYYY",
default=datetime.now().replace(hour=0, minute=0, second=0, microsecond=0) - timedelta(days=547), type=valid_date)
parser.add_argument("--enddate", help="End date - format MM/DD/YYYY",
default=datetime.now().replace(hour=0, minute=0, second=0, microsecond=0), type=valid_date)
parser.add_argument("--priceline", help="Insert a horizontal black line on plot at price (float)", type=float)
parser.add_argument("--dateline", help="Insert a vertical black line on plot - format MM/DD/YYYY", type=valid_date)
parser.add_argument('--daydelta', type=int, help='Days between start date and end date.')
parser.add_argument('--zoom', type=int, default=0, help='Zoom into the plot for the last number of days.')
args = parser.parse_args()
args.stock = sorted([i.upper() for i in args.stock])
if not args.save:
args.show = True
# if len(args.stock) > 1:
# args.save = True
# args.show = False
if args.startdate > args.enddate:
parser.error(f'Start date "{args.startdate}" can not be greater than End Date "{args.enddate}"')
if args.daydelta:
args.startdate = args.enddate - timedelta(days=args.daydelta)
if args.best:
args.bb = True
args.macd = True
args.rsi = True
# log_message(parser.parse_args().__str__())
if len(sys.argv) == 1:
parser.print_help(sys.stderr)
sys.exit(1)
return args
def remove_holidays_and_weekends(start, end, move_date_forward=True):
holidays = USFederalHolidayCalendar().holidays(start=start - timedelta(days=14), end=end + timedelta(days=14)).to_pydatetime()
if move_date_forward:
dt = start
else:
dt = end
while dt in holidays or dt.weekday() >= 5:
if move_date_forward:
dt += timedelta(days=1)
else:
dt -= timedelta(days=1)
return dt
class StockAnalysis:
def __init__(self, stock: str, start: datetime, end: datetime,
sma: list=[200, 100, 50, 5], close_col: str="Close", plot_type: str="line",
weekly: bool=False, priceline: float=None, dateline: datetime=None):
"""
Gather data for the stock between the given dates.
SMA: list of simple moving average days to plot
CLOSE_COL: column name to use for close data. Usually Close or Adj Close
PLOT_TYPE: how to plot the data. line or candlestick
"""
self.stock = stock
self.stock_name = None
self.stock_count = self.confirm_stock_symbol()
if self.stock_count > 1:sys.exit(1)
self.start = start
self.end = end
self.close_col = close_col
self.sma = sma
self.ema = sma + [20]
self.plot_type = plot_type
self.weekly = weekly
self.priceline = priceline
self.dateline = dateline
self.df = self.get_data_frame(self.stock, self.start,
self.end, weekly=self.weekly)
self.set_day_color()
self.simple_moving_average()
self.exponential_moving_average()
def confirm_stock_symbol(self):
count, name = search_stock_symbols(self.stock)
if count == 0:
print(f'Symbol {self.stock} is not traded on the Nasdaq exchange')
elif count == 1:
self.stock_name = name
else:
print(f'Multiple stock symbols found for {self.stock}')
return count
def store_stock(self, stock: str, start: datetime, end: datetime, filename: str):
print(f"Pulling stock data for {stock} from {start} to {end}")
try:
# df = pd.DataFrame(data.DataReader(stock,'yahoo',start,end))
df = pd.DataFrame(data.DataReader(stock,'stooq',start,end))
except KeyError:
print("Stock out of range")
df = pd.DataFrame()
df = df.reset_index()
if os.path.exists(filename):
df_existing = pd.read_csv(filename, parse_dates=['Date'])
df = df_existing.append(df).reset_index(drop=True)
df = df.sort_values('Date')
if df.empty and self.stock_name == 0:
print(f"No data found for {self.stock}")
sys.exit(1)
# sometimes data is returned with two rows for the same date. The last row is the row to keep.
df = df[~df.Date.duplicated(keep='last')]
df_store = df.copy()
market_close = datetime.now().replace(hour=15, minute=5, second=0, microsecond=0)
# market_close = datetime.now().replace(hour=23, minute=59, second=59, microsecond=999999)
if (df.Date.sort_values().iloc[-1].date() == datetime.today().date()) and (datetime.now() < market_close):
# The market has not closed today so do not store today's data in csv.
df_store.drop(df_store.tail(1).index,inplace=True)
df_store.to_csv(filename, index=False)
return df
def get_data_frame(self, stock: str, start: datetime, end: datetime, get_most_recent_data: bool = True, weekly: bool = False):
"""
:stock: text stock ticker
:start: date to start stock data in format "MM-DD-YYYY" or python datetime
:end: date to end stock data in format "MM-DD-YYYY" or python datetime
# :get_most_recent_data: update stored data to have recent close data
"""
if '-usd' in stock.lower():
start_dt = start
end_dt = end
else:
# do not remove days when dealing with a currency like ETH-USD
start_dt = remove_holidays_and_weekends(start, end, move_date_forward=True)
end_dt = remove_holidays_and_weekends(start, end, move_date_forward=False)
filename = f"data/{stock}.csv"
if os.path.exists(filename):
df = pd.read_csv(filename, parse_dates=['Date'])
if start_dt >= df.Date.min() and end_dt <= df.Date.max():
print(f"Using Stored Stock Data for {stock} from {start_dt.date()} to {end_dt.date()}")
if end_dt > df.Date.max():
interim_dt = remove_holidays_and_weekends(df.Date.max() + | pd.Timedelta("1d") | pandas.Timedelta |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
from matplotlib.lines import Line2D
from sklearn.metrics import confusion_matrix
import seaborn as sn
import sys
import re
import csv
from itertools import chain
def visualizeData(file, compfile, source, class_):
if(source == 'reddit' or source == 'combined'):
labels = ['Spelling Delta', 'Caps', 'Text length', 'Word length', 'Punctuation', 'Adjective', 'Interjection', 'Common Noun', 'Proper Noun', 'Verb', 'Pronoun', 'Adverb']
features = ['spellDelta', 'caps', 'textLength', 'sentenceWordLength', ',', 'A', '!', 'N', '^', 'V', 'O', 'R', class_]
else:
labels = ['Elongation', 'Caps', 'Text length', 'Word length', 'Punctuation', 'Adjective', 'Interjection', 'Common Noun', 'Proper Noun', 'Verb', 'Pronoun', 'Adverb']
features = ['elongated', 'caps', 'textLength', 'sentenceWordLength', ',', 'A', '!', 'N', '^', 'V', 'O', 'R', class_]
data = pd.read_csv(file, header=0, sep=',')
comparison = pd.read_csv(compfile, header=0, sep=',')
featuredata = data[features]
comparisondata = comparison[features]
# ax1.vlines(x=featuredata[class_], ymin=0, ymax=featuredata[features[0]], color='firebrick', alpha=0.7, linewidth=10)
# for i, cty in enumerate(featuredata[features[0]]):
# ax1.text(i, cty+0.5, round(cty, 1), horizontalalignment='center')
# ax1.set_title(labels[0], fontdict={'size':12})
# ax1.set(ylabel='Average amount per text')
# ax1.set_ylim(0, 0.18)
# ax1.axis('square')
# #ax1.set_xticks(featuredata.index, featuredata[class_], rotation=60, horizontalalignment='right', fontsize=12)
# print(featuredata[features[1]])
# ax2.vlines(x=featuredata[class_], ymin=0, ymax=featuredata[features[1]], color='firebrick', alpha=0.7, linewidth=10)
# for i, cty in enumerate(featuredata[features[1]]):
# ax2.text(i, cty+0.5, round(cty, 1), horizontalalignment='center')
# ax2.set_title(labels[1], fontdict={'size':12})
# ax2.set_ylim(0, 0.8)
# ax2.axis('scaled')
# #ax2.set_xticks(featuredata.index, featuredata[class_], rotation=60, horizontalalignment='right', fontsize=12)
# print(featuredata[features[2]])
# ax3.vlines(x=featuredata[class_], ymin=0, ymax=featuredata[features[2]], color='firebrick', alpha=0.7, linewidth=10)
# for i, cty in enumerate(featuredata[features[2]]):
# ax3.text(i, cty+0.5, round(cty, 1), horizontalalignment='center')
# ax3.set_title(labels[2], fontdict={'size':12})
# ax3.set_ylim(0, 18)
# #ax3.set_xticks(featuredata.index, featuredata[class_], rotation=60, horizontalalignment='right', fontsize=12)
# print(featuredata[features[3]])
# ax4.vlines(x=featuredata[class_], ymin=0, ymax=featuredata[features[3]], color='firebrick', alpha=0.7, linewidth=10)
# for i, cty in enumerate(featuredata[features[3]]):
# ax4.text(i, cty+0.5, round(cty, 1), horizontalalignment='center')
# ax4.set_title(labels[3], fontdict={'size':12})
# ax4.set_ylim(0, 5)
# #ax4.set_xticks(featuredata.index, featuredata[class_], rotation=60, horizontalalignment='right', fontsize=12)
# print(featuredata[features[4]])
# ax5.vlines(x=featuredata[class_], ymin=0, ymax=featuredata[features[4]], color='firebrick', alpha=0.7, linewidth=10)
# for i, cty in enumerate(featuredata[features[4]]):
# ax5.text(i, cty+0.5, round(cty, 1), horizontalalignment='center')
# ax5.set_title(labels[4], fontdict={'size':12})
# ax5.set(ylabel='Percentage of words in text')
# ax5.set_ylim(0, 55)
# #ax5.set_xticks(featuredata.index, featuredata[class_], rotation=60, horizontalalignment='right', fontsize=12)
# print(featuredata[features[5]])
# ax6.vlines(x=featuredata[class_], ymin=0, ymax=featuredata[features[5]], color='firebrick', alpha=0.7, linewidth=10)
# for i, cty in enumerate(featuredata[features[5]]):
# ax6.text(i, cty+0.5, round(cty, 1), horizontalalignment='center')
# ax6.set_title(labels[5], fontdict={'size':12})
# ax6.set_ylim(0, 15)
# #ax6.set_xticks(featuredata.index, featuredata[class_], rotation=60, horizontalalignment='right', fontsize=12)
# print(featuredata[features[6]])
# ax7.vlines(x=featuredata[class_], ymin=0, ymax=featuredata[features[6]], color='firebrick', alpha=0.7, linewidth=10)
# for i, cty in enumerate(featuredata[features[6]]):
# ax7.text(i, cty+0.5, round(cty, 1), horizontalalignment='center')
# ax7.set_title(labels[6], fontdict={'size':12})
# ax7.set_ylim(0, 8)
# #ax7.set_xticks(featuredata.index, featuredata[class_], rotation=60, horizontalalignment='right', fontsize=12)
# print(featuredata[features[7]])
# ax8.vlines(x=featuredata[class_], ymin=0, ymax=featuredata[features[7]], color='firebrick', alpha=0.7, linewidth=10)
# for i, cty in enumerate(featuredata[features[7]]):
# ax8.text(i, cty+0.5, round(cty, 1), horizontalalignment='center')
# ax8.set_title(labels[7], fontdict={'size':12})
# ax8.set_ylim(0, 65)
# #ax8.set_xticks(featuredata.index, featuredata[class_], rotation=60, horizontalalignment='right', fontsize=12)
# print(featuredata[features[8]])
# ax9.vlines(x=featuredata[class_], ymin=0, ymax=featuredata[features[8]], color='firebrick', alpha=0.7, linewidth=10)
# for i, cty in enumerate(featuredata[features[8]]):
# ax9.text(i, cty+0.5, round(cty, 1), horizontalalignment='center')
# ax9.set_title(labels[8], fontdict={'size':12})
# ax9.set(ylabel='Percentage of words in text')
# ax9.set_ylim(0,25)
# plt.xticks(featuredata.index, featuredata[class_], rotation=50, horizontalalignment='center', fontsize=6)
# #ax9.set_xticks(featuredata.index, featuredata[class_], rotation=60, horizontalalignment='right', fontsize=12)
# print(featuredata[features[9]])
# ax10.vlines(x=featuredata[class_], ymin=0, ymax=featuredata[features[9]], color='firebrick', alpha=0.7, linewidth=10)
# for i, cty in enumerate(featuredata[features[9]]):
# ax10.text(i, cty+0.5, round(cty, 1), horizontalalignment='center')
# ax10.set_title(labels[9], fontdict={'size':12})
# #plt.xticks(featuredata.index, featuredata[class_], rotation=50, horizontalalignment='center', fontsize=6)
# ax10.set_ylim(0, 37)
# #ax10.set_xticks(featuredata.index, featuredata[class_], rotation=60, horizontalalignment='right', fontsize=12)
# print(featuredata[features[10]])
# ax11.vlines(x=featuredata[class_], ymin=0, ymax=featuredata[features[10]], color='firebrick', alpha=0.7, linewidth=10)
# for i, cty in enumerate(featuredata[features[10]]):
# ax11.text(i, cty+0.5, round(cty, 1), horizontalalignment='center')
# ax11.set_title(labels[10], fontdict={'size':12})
# #plt.xticks(featuredata.index, featuredata[class_], rotation=50, horizontalalignment='center', fontsize=6)
# ax11.set_ylim(0, 15)
# #ax11.set_xticks(featuredata.index, featuredata[class_], rotation=60, horizontalalignment='right', fontsize=12)
# print(featuredata[features[11]])
# ax12.vlines(x=featuredata[class_], ymin=0, ymax=featuredata[features[11]], color='firebrick', alpha=0.7, linewidth=10)
# for i, cty in enumerate(featuredata[features[11]]):
# ax12.text(i, cty+0.5, round(cty, 1), horizontalalignment='center')
# ax12.set_title(labels[11], fontdict={'size':12})
# ax12.set_ylim(0, 9);
# plt.xticks(featuredata.index, featuredata[class_], rotation=50, horizontalalignment='center', fontsize=6)
#ax1.errorbar(featuredata[features[0]], data[class_], xerr=stddata[features[0]],fmt='+', solid_capstyle='projecting', capsize=5)
if(class_ == 'lang'):
if(source == 'twitter'):
scale = 0.1
fig, ((ax1, ax2, ax3, ax4), (ax5, ax6, ax7, ax8), (ax9, ax10, ax11, ax12)) = plt.subplots(3, 4, sharey=True, figsize=(10,7))
print(featuredata[features[0]])
ax1.plot(featuredata[features[0]], featuredata[class_], '.', label='Twitter', markersize=4)
ax1.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[0]]):
if(y == featuredata[features[0]].max() or y == featuredata[features[0]].min()):
ax1.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=6)
ax1.xaxis.set_ticks(np.arange(0, featuredata[features[0]].max() + 0.1, 0.1))
ax1.set_xlim(0, 0.22);
ax1.set_ylim(-0.5, len(data[class_])+scale)
ax1.set_title(labels[0])
print(featuredata[features[1]])
ax2.plot(featuredata[features[1]], featuredata[class_], '.', label='Twitter', markersize=4)
ax2.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[1]]):
if(y == featuredata[features[1]].max() or y == featuredata[features[1]].min()):
ax2.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=6)
ax2.xaxis.set_ticks(np.arange(0, featuredata[features[1]].max() + 0.2, 0.1))
ax2.set_xlim(0.20, 0.72);
ax2.set_ylim(-0.5, len(data[class_])+scale)
ax2.set_title(labels[1])
print(featuredata[features[2]])
ax3.plot(featuredata[features[2]], featuredata[class_], '.', label='Twitter', markersize=4)
ax3.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[2]]):
if(y == featuredata[features[2]].max() or y == featuredata[features[2]].min()):
ax3.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=6)
ax3.xaxis.set_ticks(np.arange(0, featuredata[features[2]].max() + 10, 2))
ax3.set_xlim(4.4, 18.5);
ax3.set_ylim(-0.5, len(data[class_])+scale)
ax3.set_title(labels[2])
print(featuredata[features[3]])
ax4.plot(featuredata[features[3]], featuredata[class_], '.', label='Twitter', markersize=4)
ax4.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[3]]):
if(y == featuredata[features[3]].max() or y == featuredata[features[3]].min()):
ax4.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=6)
ax4.xaxis.set_ticks(np.arange(0, featuredata[features[3]].max() + 0.2, 0.2))
ax4.set_xlim(3.8, 4.92);
ax4.set_ylim(-0.5, len(data[class_])+scale)
ax4.set_title(labels[3])
print(featuredata[features[4]])
ax5.plot(featuredata[features[4]], featuredata[class_], '.', label='Twitter', markersize=4)
ax5.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[4]]):
if(y == featuredata[features[4]].max() or y == featuredata[features[4]].min()):
ax5.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=6)
ax5.xaxis.set_ticks(np.arange(0, featuredata[features[4]].max() + 4, 6))
ax5.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax5.set_xlim(13, 55);
ax5.set_ylim(-0.5, len(data[class_])+scale)
ax5.set_title(labels[4])
print(featuredata[features[5]])
ax6.plot(featuredata[features[5]], featuredata[class_], '.', label='Twitter', markersize=4)
ax6.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[5]]):
if(y == featuredata[features[5]].max() or y == featuredata[features[5]].min()):
ax6.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=6)
ax6.xaxis.set_ticks(np.arange(0, featuredata[features[5]].max() + 1, 2))
ax6.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax6.set_xlim(6, 15);
ax6.set_ylim(-0.5, len(data[class_])+scale)
ax6.set_title(labels[5])
print(featuredata[features[6]])
ax7.plot(featuredata[features[6]], featuredata[class_], '.', label='Twitter', markersize=4)
ax7.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[6]]):
if(y == featuredata[features[6]].max() or y == featuredata[features[6]].min()):
ax7.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=6)
ax7.xaxis.set_ticks(np.arange(0, featuredata[features[6]].max() + 0.5, 1))
ax7.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax7.set_xlim(0, 8);
ax7.set_ylim(-0.5, len(data[class_])+scale)
ax7.set_title(labels[6])
print(featuredata[features[7]])
ax8.plot(featuredata[features[7]], featuredata[class_], '.', label='Twitter', markersize=4)
ax8.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[7]]):
if(y == featuredata[features[7]].max() or y == featuredata[features[7]].min()):
ax8.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=6)
ax8.xaxis.set_ticks(np.arange(0, featuredata[features[7]].max() + 3, 6))
ax8.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax8.set_xlim(25, 66);
ax8.set_ylim(-0.5, len(data[class_])+scale)
ax8.set_title(labels[7])
print(featuredata[features[8]])
ax9.plot(featuredata[features[8]], featuredata[class_], '.', label='Twitter', markersize=4)
ax9.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[8]]):
if(y == featuredata[features[8]].max() or y == featuredata[features[8]].min()):
ax9.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=6)
ax9.xaxis.set_ticks(np.arange(0, featuredata[features[8]].max() + 4, 3))
ax9.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax9.set_xlim(7, 25);
ax9.set_ylim(-0.5, len(data[class_])+scale)
ax9.set_title(labels[8])
print(featuredata[features[9]])
ax10.plot(featuredata[features[9]], featuredata[class_], '.', label='Twitter', markersize=4)
ax10.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[9]]):
if(y == featuredata[features[9]].max() or y == featuredata[features[9]].min()):
ax10.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=6)
ax10.xaxis.set_ticks(np.arange(0, featuredata[features[9]].max() + 3, 5))
ax10.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax10.set_xlim(14, 36.5);
ax10.set_ylim(-0.5, len(data[class_])+scale)
ax10.set_title(labels[9])
print(featuredata[features[10]])
ax11.plot(featuredata[features[10]], featuredata[class_], '.', label='Twitter', markersize=4)
ax11.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[10]]):
if(y == featuredata[features[10]].max() or y == featuredata[features[10]].min()):
ax11.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=6)
ax11.xaxis.set_ticks(np.arange(0, featuredata[features[10]].max() + 6, 3))
ax11.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax11.set_xlim(0, 15);
ax11.set_ylim(-0.5, len(data[class_])+scale)
ax11.set_title(labels[10])
print(featuredata[features[11]])
ax12.plot(featuredata[features[11]], featuredata[class_], '.', label='Twitter', markersize=4)
ax12.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[11]]):
if(y == featuredata[features[11]].max() or y == featuredata[features[11]].min()):
ax12.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=6)
ax12.xaxis.set_ticks(np.arange(0, featuredata[features[11]].max() + 1, 1))
ax12.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax12.set_xlim(2, 8);
ax12.set_ylim(-0.5, len(data[class_])+scale)
ax12.set_title(labels[11])
for ax in fig.get_axes():
ax.tick_params(axis='x', labelsize=7)
#ax.tick_params(axis='y', labelsize=7)
elif(source == 'reddit'):
scale = 0.1
fig, ((ax1, ax2, ax3, ax4), (ax5, ax6, ax7, ax8), (ax9, ax10, ax11, ax12)) = plt.subplots(3, 4, sharey=True, figsize=(10,10))
print(featuredata[features[0]])
ax1.plot(featuredata[features[0]], featuredata[class_], '.', label='European', markersize=3)
ax1.plot(comparisondata[features[0]], comparisondata[class_], 'rx', label='non-European', markersize=3)
#ax1.hlines(y=data[class_], xmin=0, xmax=featuredata[features[0]], color='firebrick', alpha=0.7, linewidth=10)
ax1.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[0]]):
if(y == featuredata[features[0]].max() or y == featuredata[features[0]].min()):
ax1.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[0]]):
if(y == comparisondata[features[0]].max() or y == comparisondata[features[0]].min()):
ax1.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax1.xaxis.set_ticks(np.arange(0, featuredata[features[0]].max() + 0.1, 0.2))
ax1.set_xlim(1.6, 2.85);
ax1.set_ylim(-0.5, len(data[class_])+scale)
ax1.set_title(labels[0])
#ax1.legend(loc='upper center', bbox_to_anchor=(0.5,-0.1), prop={'size': 6})
ax1.legend(loc='lower center', bbox_to_anchor=(-0.6,0.9), prop={'size': 6})
print(featuredata[features[1]])
ax2.plot(featuredata[features[1]], featuredata[class_], '.', label='European', markersize=3)
ax2.plot(comparisondata[features[1]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax2.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[1]]):
if(y == featuredata[features[1]].max() or y == featuredata[features[1]].min()):
ax2.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[1]]):
if(y == comparisondata[features[1]].max() or y == comparisondata[features[1]].min()):
ax2.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax2.xaxis.set_ticks(np.arange(0, featuredata[features[1]].max() + 0.2, 0.3))
ax2.set_xlim(0.08, 1.8);
ax2.set_ylim(-0.5, len(data[class_])+scale)
ax2.set_title(labels[1])
print(featuredata[features[2]])
ax3.plot(featuredata[features[2]], featuredata[class_], '.', label='European', markersize=3)
ax3.plot(comparisondata[features[2]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax3.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[2]]):
if(y == featuredata[features[2]].max() or y == featuredata[features[2]].min()):
ax3.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[2]]):
if(y == comparisondata[features[2]].max() or y == comparisondata[features[2]].min()):
ax3.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax3.xaxis.set_ticks(np.arange(0, featuredata[features[2]].max() + 10, 6))
ax3.set_xlim(11, 70);
ax3.set_ylim(-0.5, len(data[class_])+scale)
ax3.set_title(labels[2])
print(featuredata[features[3]])
ax4.plot(featuredata[features[3]], featuredata[class_], '.', label='European', markersize=3)
ax4.plot(comparisondata[features[3]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax4.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[3]]):
if(y == featuredata[features[3]].max() or y == featuredata[features[3]].min()):
ax4.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[3]]):
if(y == comparisondata[features[3]].max() or y == comparisondata[features[3]].min()):
ax4.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax4.xaxis.set_ticks(np.arange(0, featuredata[features[3]].max() + 0.2, 0.2))
ax4.set_xlim(3.9, 5.1);
ax4.set_ylim(-0.5, len(data[class_])+scale)
ax4.set_title(labels[3])
print(featuredata[features[4]])
ax5.plot(featuredata[features[4]], featuredata[class_], '.', label='European', markersize=3)
ax5.plot(comparisondata[features[4]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax5.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[4]]):
if(y == featuredata[features[4]].max() or y == featuredata[features[4]].min()):
if(x == 0):
ax5.text(y+0.3, x+0.3, round(y, 2), horizontalalignment='right', fontsize=5)
else:
ax5.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[4]]):
if(y == comparisondata[features[4]].max() or y == comparisondata[features[4]].min()):
if(x == 0):
ax5.text(y-0.3, x+0.3, round(y, 2), horizontalalignment='left', fontsize=5)
else:
ax5.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax5.xaxis.set_ticks(np.arange(0, featuredata[features[4]].max() + 4, 3))
ax5.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax5.set_xlim(15.7, 32);
ax5.set_ylim(-0.5, len(data[class_])+scale)
ax5.set_title(labels[4])
print(featuredata[features[5]])
ax6.plot(featuredata[features[5]], featuredata[class_], '.', label='European', markersize=3)
ax6.plot(comparisondata[features[5]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax6.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[5]]):
if(y == featuredata[features[5]].max() or y == featuredata[features[5]].min()):
ax6.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[5]]):
if(y == comparisondata[features[5]].max() or y == comparisondata[features[5]].min()):
ax6.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax6.xaxis.set_ticks(np.arange(0, featuredata[features[5]].max() + 1, 1))
ax6.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=1))
ax6.set_xlim(4.5, 8.8);
ax6.set_ylim(-0.5, len(data[class_])+scale)
ax6.set_title(labels[5])
print(featuredata[features[6]])
ax7.plot(featuredata[features[6]], featuredata[class_], '.', label='European', markersize=3)
ax7.plot(comparisondata[features[6]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax7.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[6]]):
if(y == featuredata[features[6]].max() or y == featuredata[features[6]].min()):
ax7.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[6]]):
if(y == comparisondata[features[6]].max() or y == comparisondata[features[6]].min()):
ax7.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax7.xaxis.set_ticks(np.arange(0, featuredata[features[6]].max() + 0.5, 0.5))
ax7.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=1))
ax7.set_xlim(1.9, 5);
ax7.set_ylim(-0.5, len(data[class_])+scale)
ax7.set_title(labels[6])
print(featuredata[features[7]])
ax8.plot(featuredata[features[7]], featuredata[class_], '.', label='European', markersize=3)
ax8.plot(comparisondata[features[7]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax8.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[7]]):
if(y == featuredata[features[7]].max() or y == featuredata[features[7]].min()):
ax8.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[7]]):
if(y == comparisondata[features[7]].max() or y == comparisondata[features[7]].min()):
ax8.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax8.xaxis.set_ticks(np.arange(0, featuredata[features[7]].max() + 3, 1))
ax8.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax8.set_xlim(16, 22.5);
ax8.set_ylim(-0.5, len(data[class_])+scale)
ax8.set_title(labels[7])
print(featuredata[features[8]])
ax9.plot(featuredata[features[8]], featuredata[class_], '.', label='European', markersize=3)
ax9.plot(comparisondata[features[8]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax9.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[8]]):
if(y == featuredata[features[8]].max() or y == featuredata[features[8]].min()):
ax9.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[8]]):
if(y == comparisondata[features[8]].max() or y == comparisondata[features[8]].min()):
ax9.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax9.xaxis.set_ticks(np.arange(0, featuredata[features[8]].max() + 4, 2))
ax9.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax9.set_xlim(15, 26.5);
ax9.set_ylim(-0.5, len(data[class_])+scale)
ax9.set_title(labels[8])
print(featuredata[features[9]])
ax10.plot(featuredata[features[9]], featuredata[class_], '.', label='European', markersize=3)
ax10.plot(comparisondata[features[9]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax10.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[9]]):
if(y == featuredata[features[9]].max() or y == featuredata[features[9]].min()):
ax10.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[9]]):
if(y == comparisondata[features[9]].max() or y == comparisondata[features[9]].min()):
ax10.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax10.xaxis.set_ticks(np.arange(0, featuredata[features[9]].max() + 3, 1))
ax10.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax10.set_xlim(10, 17);
ax10.set_ylim(-0.5, len(data[class_])+scale)
ax10.set_title(labels[9])
print(featuredata[features[10]])
ax11.plot(featuredata[features[10]], featuredata[class_], '.', label='European', markersize=3)
ax11.plot(comparisondata[features[10]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax11.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[10]]):
if(y == featuredata[features[10]].max() or y == featuredata[features[10]].min()):
ax11.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[10]]):
if(y == comparisondata[features[10]].max() or y == comparisondata[features[10]].min()):
ax11.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax11.xaxis.set_ticks(np.arange(0, featuredata[features[10]].max() + 6, 1))
ax11.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax11.set_xlim(2, 8);
ax11.set_ylim(-0.5, len(data[class_])+scale)
ax11.set_title(labels[10])
print(featuredata[features[11]])
ax12.plot(featuredata[features[11]], featuredata[class_], '.', label='European', markersize=3)
ax12.plot(comparisondata[features[11]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax12.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[11]]):
if(y == featuredata[features[11]].max() or y == featuredata[features[11]].min()):
ax12.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[11]]):
if(y == comparisondata[features[11]].max() or y == comparisondata[features[11]].min()):
ax12.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax12.xaxis.set_ticks(np.arange(0, featuredata[features[11]].max() + 1, 0.5))
ax12.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=1))
ax12.set_xlim(2.5, 4.1);
ax12.set_ylim(-0.5, len(data[class_])+scale)
ax12.set_title(labels[11])
for ax in fig.get_axes():
ax.tick_params(axis='x', labelsize=7)
ax.tick_params(axis='y', labelsize=7)
else:
scale = 0.1
fig, ((ax1, ax2, ax3, ax4), (ax5, ax6, ax7, ax8), (ax9, ax10, ax11, ax12)) = plt.subplots(3, 4, sharey=True, figsize=(10,10))
print(featuredata[features[0]].min(), featuredata[features[0]].max(), comparisondata[features[0]].min(), comparisondata[features[0]].max())
ax1.plot(featuredata[features[0]], featuredata[class_], '.', label='European', markersize=3)
ax1.plot(comparisondata[features[0]], comparisondata[class_], 'rx', label='non-European', markersize=3)
#ax1.hlines(y=data[class_], xmin=0, xmax=featuredata[features[0]], color='firebrick', alpha=0.7, linewidth=10)
ax1.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[0]]):
if(y == featuredata[features[0]].max() or y == featuredata[features[0]].min()):
ax1.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[0]]):
if(y == comparisondata[features[0]].max() or y == comparisondata[features[0]].min()):
ax1.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax1.xaxis.set_ticks(np.arange(0, 2.8, 0.2))
ax1.set_xlim(1.15, 2.7);
ax1.set_ylim(-0.5, len(data[class_])+scale)
ax1.set_title(labels[0])
#ax1.legend(loc='upper center', bbox_to_anchor=(0.5,-0.1), prop={'size': 6})
ax1.legend(loc='lower center', bbox_to_anchor=(-0.6,0.9), prop={'size': 6})
print(featuredata[features[1]].min(), featuredata[features[1]].max(), comparisondata[features[1]].min(), comparisondata[features[1]].max())
ax2.plot(featuredata[features[1]], featuredata[class_], '.', label='European', markersize=3)
ax2.plot(comparisondata[features[1]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax2.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[1]]):
if(y == featuredata[features[1]].max() or y == featuredata[features[1]].min()):
ax2.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[1]]):
if(y == comparisondata[features[1]].max() or y == comparisondata[features[1]].min()):
ax2.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax2.xaxis.set_ticks(np.arange(0, 1.8, 0.3))
ax2.set_xlim(0.08, 1.7);
ax2.set_ylim(-0.5, len(data[class_])+scale)
ax2.set_title(labels[1])
print(featuredata[features[2]].min(), featuredata[features[2]].max(), comparisondata[features[2]].min(), comparisondata[features[2]].max())
ax3.plot(featuredata[features[2]], featuredata[class_], '.', label='European', markersize=3)
ax3.plot(comparisondata[features[2]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax3.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[2]]):
if(y == featuredata[features[2]].max() or y == featuredata[features[2]].min()):
ax3.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[2]]):
if(y == comparisondata[features[2]].max() or y == comparisondata[features[2]].min()):
ax3.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax3.xaxis.set_ticks(np.arange(0, 70, 6))
ax3.set_xlim(2, 70);
ax3.set_ylim(-0.5, len(data[class_])+scale)
ax3.set_title(labels[2])
print(featuredata[features[3]].min(), featuredata[features[3]].max(), comparisondata[features[3]].min(), comparisondata[features[3]].max())
ax4.plot(featuredata[features[3]], featuredata[class_], '.', label='European', markersize=3)
ax4.plot(comparisondata[features[3]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax4.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[3]]):
if(y == featuredata[features[3]].max() or y == featuredata[features[3]].min()):
ax4.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[3]]):
if(y == comparisondata[features[3]].max() or y == comparisondata[features[3]].min()):
ax4.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax4.xaxis.set_ticks(np.arange(0, featuredata[features[3]].max() + 0.2, 0.2))
ax4.set_xlim(3.9, 5.1);
ax4.set_ylim(-0.5, len(data[class_])+scale)
ax4.set_title(labels[3])
print(featuredata[features[4]].min(), featuredata[features[4]].max(), comparisondata[features[4]].min(), comparisondata[features[4]].max())
ax5.plot(featuredata[features[4]], featuredata[class_], '.', label='European', markersize=3)
ax5.plot(comparisondata[features[4]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax5.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[4]]):
if(y == featuredata[features[4]].max() or y == featuredata[features[4]].min()):
#if(x == 0):
# ax5.text(y+0.3, x+0.3, round(y, 2), horizontalalignment='right', fontsize=5)
#else:
ax5.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[4]]):
if(y == comparisondata[features[4]].max() or y == comparisondata[features[4]].min()):
#if(x == 0):
# ax5.text(y-0.3, x+0.3, round(y, 2), horizontalalignment='left', fontsize=5)
#else:
ax5.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax5.xaxis.set_ticks(np.arange(0, 60, 8))
ax5.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax5.set_xlim(13, 56);
ax5.set_ylim(-0.5, len(data[class_])+scale)
ax5.set_title(labels[4])
print(featuredata[features[5]].min(), featuredata[features[5]].max(), comparisondata[features[5]].min(), comparisondata[features[5]].max())
ax6.plot(featuredata[features[5]], featuredata[class_], '.', label='European', markersize=3)
ax6.plot(comparisondata[features[5]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax6.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[5]]):
if(y == featuredata[features[5]].max() or y == featuredata[features[5]].min()):
ax6.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[5]]):
if(y == comparisondata[features[5]].max() or y == comparisondata[features[5]].min()):
ax6.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax6.xaxis.set_ticks(np.arange(0, 15, 2))
ax6.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax6.set_xlim(4, 14);
ax6.set_ylim(-0.5, len(data[class_])+scale)
ax6.set_title(labels[5])
print(featuredata[features[6]].min(), featuredata[features[6]].max(), comparisondata[features[6]].min(), comparisondata[features[6]].max())
ax7.plot(featuredata[features[6]], featuredata[class_], '.', label='European', markersize=3)
ax7.plot(comparisondata[features[6]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax7.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[6]]):
if(y == featuredata[features[6]].max() or y == featuredata[features[6]].min()):
ax7.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[6]]):
if(y == comparisondata[features[6]].max() or y == comparisondata[features[6]].min()):
ax7.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax7.xaxis.set_ticks(np.arange(0, 10, 1))
ax7.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax7.set_xlim(0, 8);
ax7.set_ylim(-0.5, len(data[class_])+scale)
ax7.set_title(labels[6])
print(featuredata[features[7]].min(), featuredata[features[7]].max(), comparisondata[features[7]].min(), comparisondata[features[7]].max())
ax8.plot(featuredata[features[7]], featuredata[class_], '.', label='European', markersize=3)
ax8.plot(comparisondata[features[7]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax8.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[7]]):
if(y == featuredata[features[7]].max() or y == featuredata[features[7]].min()):
ax8.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[7]]):
if(y == comparisondata[features[7]].max() or y == comparisondata[features[7]].min()):
ax8.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax8.xaxis.set_ticks(np.arange(0, 55, 5))
ax8.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax8.set_xlim(14, 52);
ax8.set_ylim(-0.5, len(data[class_])+scale)
ax8.set_title(labels[7])
print(featuredata[features[8]].min(), featuredata[features[8]].max(), comparisondata[features[8]].min(), comparisondata[features[8]].max())
ax9.plot(featuredata[features[8]], featuredata[class_], '.', label='European', markersize=3)
ax9.plot(comparisondata[features[8]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax9.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[8]]):
if(y == featuredata[features[8]].max() or y == featuredata[features[8]].min()):
ax9.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[8]]):
if(y == comparisondata[features[8]].max() or y == comparisondata[features[8]].min()):
ax9.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax9.xaxis.set_ticks(np.arange(0, 30, 3))
ax9.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax9.set_xlim(10, 26.5);
ax9.set_ylim(-0.5, len(data[class_])+scale)
ax9.set_title(labels[8])
print(featuredata[features[9]].min(), featuredata[features[9]].max(), comparisondata[features[9]].min(), comparisondata[features[9]].max())
ax10.plot(featuredata[features[9]], featuredata[class_], '.', label='European', markersize=3)
ax10.plot(comparisondata[features[9]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax10.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[9]]):
if(y == featuredata[features[9]].max() or y == featuredata[features[9]].min()):
ax10.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[9]]):
if(y == comparisondata[features[9]].max() or y == comparisondata[features[9]].min()):
ax10.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax10.xaxis.set_ticks(np.arange(0, 40, 4))
ax10.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax10.set_xlim(8, 32);
ax10.set_ylim(-0.5, len(data[class_])+scale)
ax10.set_title(labels[9])
print(featuredata[features[10]].min(), featuredata[features[10]].max(), comparisondata[features[10]].min(), comparisondata[features[10]].max())
ax11.plot(featuredata[features[10]], featuredata[class_], '.', label='European', markersize=3)
ax11.plot(comparisondata[features[10]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax11.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[10]]):
if(y == featuredata[features[10]].max() or y == featuredata[features[10]].min()):
ax11.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[10]]):
if(y == comparisondata[features[10]].max() or y == comparisondata[features[10]].min()):
ax11.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax11.xaxis.set_ticks(np.arange(0, 10, 1))
ax11.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax11.set_xlim(1.5, 8);
ax11.set_ylim(-0.5, len(data[class_])+scale)
ax11.set_title(labels[10])
print(featuredata[features[11]].min(), featuredata[features[11]].max(), comparisondata[features[11]].min(), comparisondata[features[11]].max())
ax12.plot(featuredata[features[11]], featuredata[class_], '.', label='European', markersize=3)
ax12.plot(comparisondata[features[11]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax12.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[11]]):
if(y == featuredata[features[11]].max() or y == featuredata[features[11]].min()):
ax12.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[11]]):
if(y == comparisondata[features[11]].max() or y == comparisondata[features[11]].min()):
ax12.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax12.xaxis.set_ticks(np.arange(0, 7, 1))
ax12.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax12.set_xlim(2, 6);
ax12.set_ylim(-0.5, len(data[class_])+scale)
ax12.set_title(labels[11])
for ax in fig.get_axes():
ax.tick_params(axis='x', labelsize=7)
ax.tick_params(axis='y', labelsize=7)
elif(class_ == 'langFam'):
if(source == 'twitter'):
print('obsolete')
else:
scale = 0
fig, ((ax1, ax2, ax3, ax4), (ax5, ax6, ax7, ax8), (ax9, ax10, ax11, ax12)) = plt.subplots(3, 4, sharey=True, figsize=(10,6))
ax1.plot(featuredata[features[0]], featuredata[class_], '.', label='European', markersize=3)
ax1.plot(comparisondata[features[0]], comparisondata[class_], 'rx', label='non-European', markersize=3)
#ax1.hlines(y=data[class_], xmin=0, xmax=featuredata[features[0]], color='firebrick', alpha=0.7, linewidth=10)
ax1.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[0]]):
if(y == featuredata[features[0]].max() or y == featuredata[features[0]].min()):
if(x == 2):
continue
else:
ax1.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[0]]):
if(y == comparisondata[features[0]].max() or y == comparisondata[features[0]].min()):
ax1.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax1.xaxis.set_ticks(np.arange(0, featuredata[features[0]].max() + 0.1, 0.1))
ax1.set_xlim(1.85, 2.3);
ax1.set_ylim(-0.5, len(data[class_])+scale)
ax1.set_title(labels[0])
#ax1.legend(loc='upper center', bbox_to_anchor=(0.5,-0.2), prop={'size': 6})
ax1.legend(loc='lower center', bbox_to_anchor=(-0.6,0.9), prop={'size': 6})
print(featuredata[features[1]])
ax2.plot(featuredata[features[1]], featuredata[class_], '.', label='European', markersize=3)
ax2.plot(comparisondata[features[1]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax2.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[1]]):
if(y == featuredata[features[1]].max() or y == featuredata[features[1]].min()):
ax2.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[1]]):
if(y == comparisondata[features[1]].max() or y == comparisondata[features[1]].min()):
ax2.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax2.xaxis.set_ticks(np.arange(0, featuredata[features[1]].max() + 0.2, 0.1))
ax2.set_xlim(0.28, 0.62);
ax2.set_ylim(-0.5, len(data[class_])+scale)
ax2.set_title(labels[1])
print(featuredata[features[2]])
ax3.plot(featuredata[features[2]], featuredata[class_], '.', label='European', markersize=3)
ax3.plot(comparisondata[features[2]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax3.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[2]]):
if(y == featuredata[features[2]].max() or y == featuredata[features[2]].min()):
if(x == 2):
ax3.text(y, x+0.3, round(y, 2), horizontalalignment='right', fontsize=5)
else:
ax3.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[2]]):
if(y == comparisondata[features[2]].max() or y == comparisondata[features[2]].min()):
if(x == 2):
ax3.text(y, x+0.3, round(y, 2), horizontalalignment='left', fontsize=5)
else:
ax3.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax3.xaxis.set_ticks(np.arange(0, featuredata[features[2]].max() + 10, 4))
ax3.set_xlim(19.5, 38);
ax3.set_ylim(-0.5, len(data[class_])+scale)
ax3.set_title(labels[2])
print(featuredata[features[3]])
ax4.plot(featuredata[features[3]], featuredata[class_], '.', label='European', markersize=3)
ax4.plot(comparisondata[features[3]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax4.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[3]]):
if(y == featuredata[features[3]].max() or y == featuredata[features[3]].min()):
ax4.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[3]]):
if(y == comparisondata[features[3]].max() or y == comparisondata[features[3]].min()):
ax4.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax4.xaxis.set_ticks(np.arange(0, featuredata[features[3]].max() + 0.2, 0.1))
ax4.set_xlim(4.25, 4.82);
ax4.set_ylim(-0.5, len(data[class_])+scale)
ax4.set_title(labels[3])
print(featuredata[features[4]])
ax5.plot(featuredata[features[4]], featuredata[class_], '.', label='European', markersize=3)
ax5.plot(comparisondata[features[4]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax5.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[4]]):
if(y == featuredata[features[4]].max() or y == featuredata[features[4]].min()):
if(x == 2):
ax5.text(y, x+0.3, round(y, 2), horizontalalignment='right', fontsize=5)
else:
ax5.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[4]]):
if(y == comparisondata[features[4]].max() or y == comparisondata[features[4]].min()):
if(x == 2):
ax5.text(y, x+0.3, round(y, 2), horizontalalignment='left', fontsize=5)
else:
ax5.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax5.xaxis.set_ticks(np.arange(0, featuredata[features[4]].max() + 4, 1))
ax5.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax5.set_xlim(20.5, 26.5);
ax5.set_ylim(-0.5, len(data[class_])+scale)
ax5.set_title(labels[4])
print(featuredata[features[5]])
ax6.plot(featuredata[features[5]], featuredata[class_], '.', label='European', markersize=3)
ax6.plot(comparisondata[features[5]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax6.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[5]]):
if(y == featuredata[features[5]].max() or y == featuredata[features[5]].min()):
ax6.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[5]]):
if(y == comparisondata[features[5]].max() or y == comparisondata[features[5]].min()):
ax6.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax6.xaxis.set_ticks(np.arange(0, featuredata[features[5]].max() + 1, 0.5))
ax6.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=1))
ax6.set_xlim(6, 8.6);
ax6.set_ylim(-0.5, len(data[class_])+scale)
ax6.set_title(labels[5])
print(featuredata[features[6]])
ax7.plot(featuredata[features[6]], featuredata[class_], '.', label='European', markersize=3)
ax7.plot(comparisondata[features[6]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax7.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[6]]):
if(y == featuredata[features[6]].max() or y == featuredata[features[6]].min()):
ax7.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[6]]):
if(y == comparisondata[features[6]].max() or y == comparisondata[features[6]].min()):
ax7.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax7.xaxis.set_ticks(np.arange(0, featuredata[features[6]].max() + 0.5, 0.2))
ax7.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=1))
ax7.set_xlim(3.2, 4.1);
ax7.set_ylim(-0.5, len(data[class_])+scale)
ax7.set_title(labels[6])
print(featuredata[features[7]])
ax8.plot(featuredata[features[7]], featuredata[class_], '.', label='European', markersize=3)
ax8.plot(comparisondata[features[7]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax8.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[7]]):
if(y == featuredata[features[7]].max() or y == featuredata[features[7]].min()):
ax8.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[7]]):
if(y == comparisondata[features[7]].max() or y == comparisondata[features[7]].min()):
ax8.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax8.xaxis.set_ticks(np.arange(0, featuredata[features[7]].max() + 3, 0.5))
ax8.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=1))
ax8.set_xlim(17.6, 20.3);
ax8.set_ylim(-0.5, len(data[class_])+scale)
ax8.set_title(labels[7])
print(featuredata[features[8]])
ax9.plot(featuredata[features[8]], featuredata[class_], '.', label='European', markersize=3)
ax9.plot(comparisondata[features[8]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax9.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[8]]):
if(y == featuredata[features[8]].max() or y == featuredata[features[8]].min()):
ax9.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[8]]):
if(y == comparisondata[features[8]].max() or y == comparisondata[features[8]].min()):
ax9.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax9.xaxis.set_ticks(np.arange(0, featuredata[features[8]].max() + 4, 1))
ax9.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax9.set_xlim(19.8, 24);
ax9.set_ylim(-0.5, len(data[class_])+scale)
ax9.set_title(labels[8])
print(featuredata[features[9]])
ax10.plot(featuredata[features[9]], featuredata[class_], '.', label='European', markersize=3)
ax10.plot(comparisondata[features[9]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax10.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[9]]):
if(y == featuredata[features[9]].max() or y == featuredata[features[9]].min()):
ax10.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[9]]):
if(y == comparisondata[features[9]].max() or y == comparisondata[features[9]].min()):
ax10.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax10.xaxis.set_ticks(np.arange(0, featuredata[features[9]].max() + 3, 0.5))
ax10.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=1))
ax10.set_xlim(11.8, 14);
ax10.set_ylim(-0.5, len(data[class_])+scale)
ax10.set_title(labels[9])
print(featuredata[features[10]])
ax11.plot(featuredata[features[10]], featuredata[class_], '.', label='European', markersize=3)
ax11.plot(comparisondata[features[10]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax11.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[10]]):
if(y == featuredata[features[10]].max() or y == featuredata[features[10]].min()):
ax11.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[10]]):
if(y == comparisondata[features[10]].max() or y == comparisondata[features[10]].min()):
ax11.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax11.xaxis.set_ticks(np.arange(0, featuredata[features[10]].max() + 6, 1))
ax11.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax11.set_xlim(4, 6.5);
ax11.set_ylim(-0.5, len(data[class_])+scale)
ax11.set_title(labels[10])
print(featuredata[features[11]])
ax12.plot(featuredata[features[11]], featuredata[class_], '.', label='European', markersize=3)
ax12.plot(comparisondata[features[11]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax12.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[11]]):
if(y == featuredata[features[11]].max() or y == featuredata[features[11]].min()):
ax12.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[11]]):
if(y == comparisondata[features[11]].max() or y == comparisondata[features[11]].min()):
ax12.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax12.xaxis.set_ticks(np.arange(0, featuredata[features[11]].max() + 1, 0.2))
ax12.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=1))
ax12.set_xlim(3.1, 3.9);
ax12.set_ylim(-0.5, len(data[class_])+scale)
ax12.set_title(labels[11])
for ax in fig.get_axes():
ax.tick_params(axis='x', labelsize=7)
#ax.tick_params(axis='y', labelsize=7)
elif(class_ == 'origin'):
if(source == 'twitter'):
scale = 0
fig, ((ax1, ax2, ax3, ax4), (ax5, ax6, ax7, ax8), (ax9, ax10, ax11, ax12)) = plt.subplots(3, 4, sharey=True, figsize=(8,6))
print(featuredata[features[0]])
ax1.plot(featuredata[features[0]], featuredata[class_], '.', label='Twitter', markersize=4)
ax1.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[0]]):
if(y == featuredata[features[0]].max() or y == featuredata[features[0]].min()):
ax1.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=6)
ax1.xaxis.set_ticks(np.arange(0, featuredata[features[0]].max() + 0.1, 0.02))
ax1.set_xlim(0.1, 0.18);
ax1.set_ylim(-0.5, len(data[class_])+scale)
ax1.set_title(labels[0])
print(featuredata[features[1]])
ax2.plot(featuredata[features[1]], featuredata[class_], '.', label='Twitter', markersize=4)
ax2.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[1]]):
if(y == featuredata[features[1]].max() or y == featuredata[features[1]].min()):
ax2.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=6)
ax2.xaxis.set_ticks(np.arange(0, featuredata[features[1]].max() + 0.2, 0.1))
ax2.set_xlim(0.3, 0.62);
ax2.set_ylim(-0.5, len(data[class_])+scale)
ax2.set_title(labels[1])
print(featuredata[features[2]])
ax3.plot(featuredata[features[2]], featuredata[class_], '.', label='Twitter', markersize=4)
ax3.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[2]]):
if(y == featuredata[features[2]].max() or y == featuredata[features[2]].min()):
ax3.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=6)
ax3.xaxis.set_ticks(np.arange(0, featuredata[features[2]].max() + 10, 2))
ax3.set_xlim(11, 15);
ax3.set_ylim(-0.5, len(data[class_])+scale)
ax3.set_title(labels[2])
print(featuredata[features[3]])
ax4.plot(featuredata[features[3]], featuredata[class_], '.', label='Twitter', markersize=4)
ax4.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[3]]):
if(y == featuredata[features[3]].max() or y == featuredata[features[3]].min()):
ax4.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=6)
ax4.xaxis.set_ticks(np.arange(0, featuredata[features[3]].max() + 0.2, 0.05))
ax4.set_xlim(4.55, 4.65);
ax4.set_ylim(-0.5, len(data[class_])+scale)
ax4.set_title(labels[3])
print(featuredata[features[4]])
ax5.plot(featuredata[features[4]], featuredata[class_], '.', label='Twitter', markersize=4)
ax5.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[4]]):
if(y == featuredata[features[4]].max() or y == featuredata[features[4]].min()):
ax5.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=6)
ax5.xaxis.set_ticks(np.arange(0, featuredata[features[4]].max() + 4, 5))
ax5.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax5.set_xlim(15, 33);
ax5.set_ylim(-0.5, len(data[class_])+scale)
ax5.set_title(labels[4])
print(featuredata[features[5]])
ax6.plot(featuredata[features[5]], featuredata[class_], '.', label='Twitter', markersize=4)
ax6.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[5]]):
if(y == featuredata[features[5]].max() or y == featuredata[features[5]].min()):
ax6.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=6)
ax6.xaxis.set_ticks(np.arange(0, featuredata[features[5]].max() + 1, 2))
ax6.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax6.set_xlim(6, 11.3);
ax6.set_ylim(-0.5, len(data[class_])+scale)
ax6.set_title(labels[5])
print(featuredata[features[6]])
ax7.plot(featuredata[features[6]], featuredata[class_], '.', label='Twitter', markersize=4)
ax7.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[6]]):
if(y == featuredata[features[6]].max() or y == featuredata[features[6]].min()):
ax7.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=6)
ax7.xaxis.set_ticks(np.arange(0, featuredata[features[6]].max() + 0.5, 0.5))
ax7.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=1))
ax7.set_xlim(2, 3.5);
ax7.set_ylim(-0.5, len(data[class_])+scale)
ax7.set_title(labels[6])
print(featuredata[features[7]])
ax8.plot(featuredata[features[7]], featuredata[class_], '.', label='Twitter', markersize=4)
ax8.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[7]]):
if(y == featuredata[features[7]].max() or y == featuredata[features[7]].min()):
ax8.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=6)
ax8.xaxis.set_ticks(np.arange(0, featuredata[features[7]].max() + 3, 6))
ax8.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax8.set_xlim(25, 45);
ax8.set_ylim(-0.5, len(data[class_])+scale)
ax8.set_title(labels[7])
print(featuredata[features[8]])
ax9.plot(featuredata[features[8]], featuredata[class_], '.', label='Twitter', markersize=4)
ax9.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[8]]):
if(y == featuredata[features[8]].max() or y == featuredata[features[8]].min()):
ax9.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=6)
ax9.xaxis.set_ticks(np.arange(0, featuredata[features[8]].max() + 4, 3))
ax9.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax9.set_xlim(7, 16);
ax9.set_ylim(-0.5, len(data[class_])+scale)
ax9.set_title(labels[8])
print(featuredata[features[9]])
ax10.plot(featuredata[features[9]], featuredata[class_], '.', label='Twitter', markersize=4)
ax10.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[9]]):
if(y == featuredata[features[9]].max() or y == featuredata[features[9]].min()):
ax10.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=6)
ax10.xaxis.set_ticks(np.arange(0, featuredata[features[9]].max() + 3, 5))
ax10.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax10.set_xlim(14, 25);
ax10.set_ylim(-0.5, len(data[class_])+scale)
ax10.set_title(labels[9])
print(featuredata[features[10]])
ax11.plot(featuredata[features[10]], featuredata[class_], '.', label='Twitter', markersize=4)
ax11.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[10]]):
if(y == featuredata[features[10]].max() or y == featuredata[features[10]].min()):
ax11.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=6)
ax11.xaxis.set_ticks(np.arange(0, featuredata[features[10]].max() + 6, 1))
ax11.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax11.set_xlim(3, 6);
ax11.set_ylim(-0.5, len(data[class_])+scale)
ax11.set_title(labels[10])
print(featuredata[features[11]])
ax12.plot(featuredata[features[11]], featuredata[class_], '.', label='Twitter', markersize=4)
ax12.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[11]]):
if(y == featuredata[features[11]].max() or y == featuredata[features[11]].min()):
ax12.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=6)
ax12.xaxis.set_ticks(np.arange(0, featuredata[features[11]].max() + 1, 0.5))
ax12.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=1))
ax12.set_xlim(3, 4.5);
ax12.set_ylim(-0.5, len(data[class_])+scale)
ax12.set_title(labels[11])
for ax in fig.get_axes():
ax.tick_params(axis='x', labelsize=7)
#ax.tick_params(axis='y', labelsize=7)
else:
scale = 0.1
fig, ((ax1, ax2, ax3, ax4), (ax5, ax6, ax7, ax8), (ax9, ax10, ax11, ax12)) = plt.subplots(3, 4, sharey=True, figsize=(8,4))
ax1.plot(featuredata[features[0]], featuredata[class_], '.', label='European', markersize=3)
ax1.plot(comparisondata[features[0]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax1.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[0]]):
if(y == featuredata[features[0]].max() or y == featuredata[features[0]].min()):
if(x == 0):
continue
else:
ax1.text(y, x+0.3, round(y, 2), horizontalalignment='left', fontsize=5)
for x, y in enumerate(comparisondata[features[0]]):
if(y == comparisondata[features[0]].max() or y == comparisondata[features[0]].min()):
ax1.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax1.xaxis.set_ticks(np.arange(0, featuredata[features[0]].max() + 0.1, 0.1))
ax1.set_xlim(1.85, 2.2);
ax1.set_ylim(-0.5, len(data[class_])+scale)
ax1.set_title(labels[0])
ax1.legend(loc='lower center', bbox_to_anchor=(-0.6,0.9), prop={'size': 6})
print(featuredata[features[1]])
ax2.plot(featuredata[features[1]], featuredata[class_], '.', label='European', markersize=3)
ax2.plot(comparisondata[features[1]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax2.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[1]]):
if(y == featuredata[features[1]].max() or y == featuredata[features[1]].min()):
ax2.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[1]]):
if(y == comparisondata[features[1]].max() or y == comparisondata[features[1]].min()):
ax2.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax2.xaxis.set_ticks(np.arange(0, featuredata[features[1]].max() + 0.2, 0.1))
ax2.set_xlim(0.28, 0.53);
ax2.set_ylim(-0.5, len(data[class_])+scale)
ax2.set_title(labels[1])
print(featuredata[features[2]])
ax3.plot(featuredata[features[2]], featuredata[class_], '.', label='European', markersize=3)
ax3.plot(comparisondata[features[2]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax3.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[2]]):
if(y == featuredata[features[2]].max() or y == featuredata[features[2]].min()):
if(x == 0):
ax3.text(y, x+0.3, round(y, 2), horizontalalignment='right', fontsize=5)
else:
ax3.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[2]]):
if(y == comparisondata[features[2]].max() or y == comparisondata[features[2]].min()):
if(x == 0):
ax3.text(y, x+0.3, round(y, 2), horizontalalignment='left', fontsize=5)
else:
ax3.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax3.xaxis.set_ticks(np.arange(0, featuredata[features[2]].max() + 10, 2))
ax3.set_xlim(19.5, 30.5);
ax3.set_ylim(-0.5, len(data[class_])+scale)
ax3.set_title(labels[2])
print(featuredata[features[3]])
ax4.plot(featuredata[features[3]], featuredata[class_], '.', label='European', markersize=3)
ax4.plot(comparisondata[features[3]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax4.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[3]]):
if(y == featuredata[features[3]].max() or y == featuredata[features[3]].min()):
ax4.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[3]]):
if(y == comparisondata[features[3]].max() or y == comparisondata[features[3]].min()):
ax4.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax4.xaxis.set_ticks(np.arange(0, featuredata[features[3]].max() + 0.2, 0.1))
ax4.set_xlim(4.25, 4.82);
ax4.set_ylim(-0.5, len(data[class_])+scale)
ax4.set_title(labels[3])
print(featuredata[features[4]])
ax5.plot(featuredata[features[4]], featuredata[class_], '.', label='European', markersize=3)
ax5.plot(comparisondata[features[4]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax5.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[4]]):
if(y == featuredata[features[4]].max() or y == featuredata[features[4]].min()):
if(x == 0):
ax5.text(y, x+0.3, round(y, 2), horizontalalignment='right', fontsize=5)
else:
ax5.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[4]]):
if(y == comparisondata[features[4]].max() or y == comparisondata[features[4]].min()):
if(x == 0):
ax5.text(y, x+0.3, round(y, 2), horizontalalignment='left', fontsize=5)
else:
ax5.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax5.xaxis.set_ticks(np.arange(0, featuredata[features[4]].max() + 4, 2))
ax5.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax5.set_xlim(20.5, 26.2);
ax5.set_ylim(-0.5, len(data[class_])+scale)
ax5.set_title(labels[4])
print(featuredata[features[5]])
ax6.plot(featuredata[features[5]], featuredata[class_], '.', label='European', markersize=3)
ax6.plot(comparisondata[features[5]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax6.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[5]]):
if(y == featuredata[features[5]].max() or y == featuredata[features[5]].min()):
ax6.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[5]]):
if(y == comparisondata[features[5]].max() or y == comparisondata[features[5]].min()):
ax6.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax6.xaxis.set_ticks(np.arange(0, featuredata[features[5]].max() + 3, 1))
ax6.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax6.set_xlim(6, 8.6);
ax6.set_ylim(-0.5, len(data[class_])+scale)
ax6.set_title(labels[5])
print(featuredata[features[6]])
ax7.plot(featuredata[features[6]], featuredata[class_], '.', label='European', markersize=3)
ax7.plot(comparisondata[features[6]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax7.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[6]]):
if(y == featuredata[features[6]].max() or y == featuredata[features[6]].min()):
ax7.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[6]]):
if(y == comparisondata[features[6]].max() or y == comparisondata[features[6]].min()):
ax7.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax7.xaxis.set_ticks(np.arange(0, featuredata[features[6]].max() + 0.5, 0.2))
ax7.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=1))
ax7.set_xlim(3.50, 4.1);
ax7.set_ylim(-0.5, len(data[class_])+scale)
ax7.set_title(labels[6])
print(featuredata[features[7]])
ax8.plot(featuredata[features[7]], featuredata[class_], '.', label='European', markersize=3)
ax8.plot(comparisondata[features[7]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax8.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[7]]):
if(y == featuredata[features[7]].max() or y == featuredata[features[7]].min()):
ax8.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[7]]):
if(y == comparisondata[features[7]].max() or y == comparisondata[features[7]].min()):
ax8.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax8.xaxis.set_ticks(np.arange(0, featuredata[features[7]].max() + 3, 1))
ax8.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax8.set_xlim(17.6, 20.3);
ax8.set_ylim(-0.5, len(data[class_])+scale)
ax8.set_title(labels[7])
print(featuredata[features[8]])
ax9.plot(featuredata[features[8]], featuredata[class_], '.', label='European', markersize=3)
ax9.plot(comparisondata[features[8]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax9.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[8]]):
if(y == featuredata[features[8]].max() or y == featuredata[features[8]].min()):
ax9.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[8]]):
if(y == comparisondata[features[8]].max() or y == comparisondata[features[8]].min()):
ax9.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax9.xaxis.set_ticks(np.arange(0, featuredata[features[8]].max() + 4, 1))
ax9.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax9.set_xlim(20, 24.2);
ax9.set_ylim(-0.5, len(data[class_])+scale)
ax9.set_title(labels[8])
print(featuredata[features[9]])
ax10.plot(featuredata[features[9]], featuredata[class_], '.', label='European', markersize=3)
ax10.plot(comparisondata[features[9]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax10.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[9]]):
if(y == featuredata[features[9]].max() or y == featuredata[features[9]].min()):
ax10.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[9]]):
if(y == comparisondata[features[9]].max() or y == comparisondata[features[9]].min()):
ax10.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax10.xaxis.set_ticks(np.arange(0, featuredata[features[9]].max() + 3, 1))
ax10.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax10.set_xlim(12, 14);
ax10.set_ylim(-0.5, len(data[class_])+scale)
ax10.set_title(labels[9])
print(featuredata[features[10]])
ax11.plot(featuredata[features[10]], featuredata[class_], '.', label='European', markersize=3)
ax11.plot(comparisondata[features[10]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax11.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[10]]):
if(y == featuredata[features[10]].max() or y == featuredata[features[10]].min()):
ax11.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[10]]):
if(y == comparisondata[features[10]].max() or y == comparisondata[features[10]].min()):
ax11.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax11.xaxis.set_ticks(np.arange(0, featuredata[features[10]].max() + 6, 0.5))
ax11.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=1))
ax11.set_xlim(4, 5.8);
ax11.set_ylim(-0.5, len(data[class_])+scale)
ax11.set_title(labels[10])
print(featuredata[features[11]])
ax12.plot(featuredata[features[11]], featuredata[class_], '.', label='European', markersize=3)
ax12.plot(comparisondata[features[11]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax12.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[11]]):
if(y == featuredata[features[11]].max() or y == featuredata[features[11]].min()):
if(x == 1):
ax12.text(y, x+0.3, round(y, 2), horizontalalignment='left', fontsize=5)
else:
ax12.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[11]]):
if(y == comparisondata[features[11]].max() or y == comparisondata[features[11]].min()):
if(x == 1):
ax12.text(y, x+0.3, round(y, 2), horizontalalignment='right', fontsize=5)
else:
ax12.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax12.xaxis.set_ticks(np.arange(0, featuredata[features[11]].max() + 1, 0.2))
ax12.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=1))
ax12.set_xlim(3.1, 3.9);
ax12.set_ylim(-0.5, len(data[class_])+scale)
ax12.set_title(labels[11])
for ax in fig.get_axes():
ax.tick_params(axis='x', labelsize=7)
ax.tick_params(axis='y', labelsize=7)
else:
scale = -0.4
fig, ((ax1, ax2, ax3, ax4), (ax5, ax6, ax7, ax8), (ax9, ax10, ax11, ax12)) = plt.subplots(3, 4, sharey=True, figsize=(8,4))
print(featuredata[features[0]].min(), featuredata[features[0]].max(), comparisondata[features[0]].min(), comparisondata[features[0]].max())
ax1.plot(featuredata[features[0]], featuredata[class_], '.', label='European', markersize=3)
ax1.plot(comparisondata[features[0]], comparisondata[class_], 'rx', label='non-European', markersize=3)
#ax1.hlines(y=data[class_], xmin=0, xmax=featuredata[features[0]], color='firebrick', alpha=0.7, linewidth=10)
ax1.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[0]]):
if(y == featuredata[features[0]].max() or y == featuredata[features[0]].min()):
if(x == 0):
ax1.text(y, x+0.3, round(y, 2), horizontalalignment='left', fontsize=5)
else:
ax1.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[0]]):
if(y == comparisondata[features[0]].max() or y == comparisondata[features[0]].min()):
if(x == 0):
ax1.text(y, x+0.3, round(y, 2), horizontalalignment='right', fontsize=5)
else:
ax1.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax1.xaxis.set_ticks(np.arange(0, 2.8, 0.1))
ax1.set_xlim(1.65, 2.2);
ax1.set_ylim(-0.5, len(data[class_])+scale)
ax1.set_title(labels[0])
#ax1.legend(loc='upper center', bbox_to_anchor=(0.5,-0.1), prop={'size': 6})
ax1.legend(loc='lower center', bbox_to_anchor=(-0.6,0.9), prop={'size': 6})
print(featuredata[features[1]].min(), featuredata[features[1]].max(), comparisondata[features[1]].min(), comparisondata[features[1]].max())
ax2.plot(featuredata[features[1]], featuredata[class_], '.', label='European', markersize=3)
ax2.plot(comparisondata[features[1]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax2.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[1]]):
if(y == featuredata[features[1]].max() or y == featuredata[features[1]].min()):
ax2.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[1]]):
if(y == comparisondata[features[1]].max() or y == comparisondata[features[1]].min()):
ax2.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax2.xaxis.set_ticks(np.arange(0, 0.6, 0.02))
ax2.set_xlim(0.4, 0.485);
ax2.set_ylim(-0.5, len(data[class_])+scale)
ax2.set_title(labels[1])
print(featuredata[features[2]].min(), featuredata[features[2]].max(), comparisondata[features[2]].min(), comparisondata[features[2]].max())
ax3.plot(featuredata[features[2]], featuredata[class_], '.', label='European', markersize=3)
ax3.plot(comparisondata[features[2]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax3.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[2]]):
if(y == featuredata[features[2]].max() or y == featuredata[features[2]].min()):
if(x == 0):
ax3.text(y, x+0.3, round(y, 2), horizontalalignment='right', fontsize=5)
else:
ax3.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[2]]):
if(y == comparisondata[features[2]].max() or y == comparisondata[features[2]].min()):
if(x == 0):
ax3.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
else:
ax3.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax3.xaxis.set_ticks(np.arange(0, 70, 6))
ax3.set_xlim(10, 30);
ax3.set_ylim(-0.5, len(data[class_])+scale)
ax3.set_title(labels[2])
print(featuredata[features[3]].min(), featuredata[features[3]].max(), comparisondata[features[3]].min(), comparisondata[features[3]].max())
ax4.plot(featuredata[features[3]], featuredata[class_], '.', label='European', markersize=3)
ax4.plot(comparisondata[features[3]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax4.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[3]]):
if(y == featuredata[features[3]].max() or y == featuredata[features[3]].min()):
ax4.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[3]]):
if(y == comparisondata[features[3]].max() or y == comparisondata[features[3]].min()):
ax4.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax4.xaxis.set_ticks(np.arange(0, 5, 0.1))
ax4.set_xlim(4.35, 4.75);
ax4.set_ylim(-0.5, len(data[class_])+scale)
ax4.set_title(labels[3])
print(featuredata[features[4]].min(), featuredata[features[4]].max(), comparisondata[features[4]].min(), comparisondata[features[4]].max())
ax5.plot(featuredata[features[4]], featuredata[class_], '.', label='European', markersize=3)
ax5.plot(comparisondata[features[4]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax5.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[4]]):
if(y == featuredata[features[4]].max() or y == featuredata[features[4]].min()):
if(x == 0):
ax5.text(y-0.2, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
else:
ax5.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[4]]):
if(y == comparisondata[features[4]].max() or y == comparisondata[features[4]].min()):
if(x == 0):
ax5.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
else:
ax5.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax5.xaxis.set_ticks(np.arange(0, 30, 2))
ax5.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax5.set_xlim(22.5, 29.5);
ax5.set_ylim(-0.5, len(data[class_])+scale)
ax5.set_title(labels[4])
print(featuredata[features[5]].min(), featuredata[features[5]].max(), comparisondata[features[5]].min(), comparisondata[features[5]].max())
ax6.plot(featuredata[features[5]], featuredata[class_], '.', label='European', markersize=3)
ax6.plot(comparisondata[features[5]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax6.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[5]]):
if(y == featuredata[features[5]].max() or y == featuredata[features[5]].min()):
ax6.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[5]]):
if(y == comparisondata[features[5]].max() or y == comparisondata[features[5]].min()):
ax6.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax6.xaxis.set_ticks(np.arange(0, 15, 2))
ax6.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax6.set_xlim(6, 10.5);
ax6.set_ylim(-0.5, len(data[class_])+scale)
ax6.set_title(labels[5])
print(featuredata[features[6]].min(), featuredata[features[6]].max(), comparisondata[features[6]].min(), comparisondata[features[6]].max())
ax7.plot(featuredata[features[6]], featuredata[class_], '.', label='European', markersize=3)
ax7.plot(comparisondata[features[6]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax7.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[6]]):
if(y == featuredata[features[6]].max() or y == featuredata[features[6]].min()):
ax7.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[6]]):
if(y == comparisondata[features[6]].max() or y == comparisondata[features[6]].min()):
ax7.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax7.xaxis.set_ticks(np.arange(0, 10, 0.5))
ax7.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=1))
ax7.set_xlim(2.9, 4.1);
ax7.set_ylim(-0.5, len(data[class_])+scale)
ax7.set_title(labels[6])
print(featuredata[features[7]].min(), featuredata[features[7]].max(), comparisondata[features[7]].min(), comparisondata[features[7]].max())
ax8.plot(featuredata[features[7]], featuredata[class_], '.', label='European', markersize=3)
ax8.plot(comparisondata[features[7]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax8.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[7]]):
if(y == featuredata[features[7]].max() or y == featuredata[features[7]].min()):
if(x == 0):
ax8.text(y, x+0.3, round(y, 2), horizontalalignment='right', fontsize=5)
else:
ax8.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[7]]):
if(y == comparisondata[features[7]].max() or y == comparisondata[features[7]].min()):
if(x == 0):
ax8.text(y, x+0.3, round(y, 2), horizontalalignment='left', fontsize=5)
else:
ax8.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax8.xaxis.set_ticks(np.arange(0, 55, 6))
ax8.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax8.set_xlim(13, 44);
ax8.set_ylim(-0.5, len(data[class_])+scale)
ax8.set_title(labels[7])
print(featuredata[features[8]].min(), featuredata[features[8]].max(), comparisondata[features[8]].min(), comparisondata[features[8]].max())
ax9.plot(featuredata[features[8]], featuredata[class_], '.', label='European', markersize=3)
ax9.plot(comparisondata[features[8]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax9.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[8]]):
if(y == featuredata[features[8]].max() or y == featuredata[features[8]].min()):
ax9.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[8]]):
if(y == comparisondata[features[8]].max() or y == comparisondata[features[8]].min()):
ax9.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax9.xaxis.set_ticks(np.arange(0, 30, 3))
ax9.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax9.set_xlim(12, 24.5);
ax9.set_ylim(-0.5, len(data[class_])+scale)
ax9.set_title(labels[8])
print(featuredata[features[9]].min(), featuredata[features[9]].max(), comparisondata[features[9]].min(), comparisondata[features[9]].max())
ax10.plot(featuredata[features[9]], featuredata[class_], '.', label='European', markersize=3)
ax10.plot(comparisondata[features[9]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax10.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[9]]):
if(y == featuredata[features[9]].max() or y == featuredata[features[9]].min()):
if(x == 0):
ax10.text(y+0.3, x+0.3, round(y, 2), horizontalalignment='right', fontsize=5)
else:
ax10.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[9]]):
if(y == comparisondata[features[9]].max() or y == comparisondata[features[9]].min()):
if(x == 0):
ax10.text(y, x+0.3, round(y, 2), horizontalalignment='left', fontsize=5)
else:
ax10.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax10.xaxis.set_ticks(np.arange(0, 40, 4))
ax10.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=0))
ax10.set_xlim(11, 22);
ax10.set_ylim(-0.5, len(data[class_])+scale)
ax10.set_title(labels[9])
print(featuredata[features[10]].min(), featuredata[features[10]].max(), comparisondata[features[10]].min(), comparisondata[features[10]].max())
ax11.plot(featuredata[features[10]], featuredata[class_], '.', label='European', markersize=3)
ax11.plot(comparisondata[features[10]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax11.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[10]]):
if(y == featuredata[features[10]].max() or y == featuredata[features[10]].min()):
ax11.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[10]]):
if(y == comparisondata[features[10]].max() or y == comparisondata[features[10]].min()):
ax11.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax11.xaxis.set_ticks(np.arange(0, 10, 0.5))
ax11.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=1))
ax11.set_xlim(4.2, 5.7);
ax11.set_ylim(-0.5, len(data[class_])+scale)
ax11.set_title(labels[10])
print(featuredata[features[11]].min(), featuredata[features[11]].max(), comparisondata[features[11]].min(), comparisondata[features[11]].max())
ax12.plot(featuredata[features[11]], featuredata[class_], '.', label='European', markersize=3)
ax12.plot(comparisondata[features[11]], comparisondata[class_], 'rx', label='non-European', markersize=3)
ax12.grid(alpha=0.5, linestyle=':')
for x, y in enumerate(featuredata[features[11]]):
if(y == featuredata[features[11]].max() or y == featuredata[features[11]].min()):
ax12.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
for x, y in enumerate(comparisondata[features[11]]):
if(y == comparisondata[features[11]].max() or y == comparisondata[features[11]].min()):
ax12.text(y, x+0.3, round(y, 2), horizontalalignment='center', fontsize=5)
ax12.xaxis.set_ticks(np.arange(0, 7, 0.3))
ax12.xaxis.set_major_formatter(mtick.PercentFormatter(decimals=1))
ax12.set_xlim(3.25, 4.21);
ax12.set_ylim(-0.5, len(data[class_])+scale)
ax12.set_title(labels[11])
for ax in fig.get_axes():
ax.tick_params(axis='x', labelsize=7)
ax.tick_params(axis='y', labelsize=7)
plt.tight_layout()
plt.savefig('visualization/'+source+'_plot_'+class_, dpi=300)
plt.close()
def plotConfusionMatrix(source):
origin = [
'Native',
'NonNative'
]
if(source == 'Combined' or source == 'CombinedNE'):
classes = ['Origin', 'Language', 'Language_Family']
lang = [
'Bulgarian',
'Croatian',
'Czech',
'Dutch',
'English',
'Finnish',
'French',
'German',
'Greek',
'Indian',
'Italian',
'Japanese',
'Lithuanian',
'Norwegian',
'Polish',
'Portuguese',
'Romanian',
'Russian',
'Serbian',
'Slovene',
'Spanish',
'Swedish',
'Turkish'
]
family = [
'Balto-Slavic',
'Germanic',
'Greek',
'Indo-Aryan',
'Japonic',
'Native',
'Romance',
'Turkic',
]
elif(source == 'Twitter'):
classes = ['Origin', 'Language_Family', 'Language']
lang = [
'English',
'French',
'German',
'Greek',
'Indian',
'Japanese',
'Russian',
'Turkish'
]
family = [
'Balto-Slavic',
'Germanic',
'Greek',
'Indo-Aryan',
'Japonic',
'Native',
'Romance',
'Turkic'
]
else:
classes = ['Origin', 'Language', 'Language_Family']
lang = [
'Bulgarian',
'Croatian',
'Czech',
'Dutch',
'English',
'Finnish',
'French',
'German',
'Italian',
'Lithuanian',
'Norwegian',
'Polish',
'Portuguese',
'Romanian',
'Russian',
'Serbian',
'Slovene',
'Spanish',
'Swedish'
]
family = [
'Balto-Slavic',
'Germanic',
'Native',
'Romance'
]
featuresets = ['Normal', 'TFIDF']
models = ['RandomForest', 'Pipeline', 'LogisticRegression', 'SVM']
fileList = []
for x in classes:
for feature in featuresets:
for model in models:
filename = 'classification/classification_report_'+source+'_'+x+'_'+feature+'_'+model+'.csv'
fileList.append([filename,x,feature,model])
output = pd.DataFrame({
'RandomForest':{
'Normal':{
'Language':{
'accuracy': {'mean': 0,'median': 0,'std': 0},
'f1_macro': {'mean': 0,'median': 0,'std': 0},
'precision': {'mean':0,'median': 0 ,'std':0},
'cv': {'mean':0,'median': 0 ,'std':0}
},
'Language_Family':{
'accuracy': {'mean': 0,'median': 0,'std': 0},
'f1_macro': {'mean': 0,'median': 0,'std': 0},
'precision': {'mean':0,'median': 0 ,'std':0},
'cv': {'mean':0,'median': 0 ,'std':0}
},
'Origin':{
'accuracy': {'mean': 0,'median': 0,'std': 0},
'f1_macro': {'mean': 0,'median': 0,'std': 0},
'precision': {'mean':0,'median': 0 ,'std':0},
'cv': {'mean':0,'median': 0 ,'std':0}
}
},
'TFIDF':{
'Language':{
'accuracy': {'mean': 0,'median': 0,'std': 0},
'f1_macro': {'mean': 0,'median': 0,'std': 0},
'precision': {'mean':0,'median': 0 ,'std':0},
'cv': {'mean':0,'median': 0 ,'std':0}
},
'Language_Family':{
'accuracy': {'mean': 0,'median': 0,'std': 0},
'f1_macro': {'mean': 0,'median': 0,'std': 0},
'precision': {'mean':0,'median': 0,'std': 0},
'cv': {'mean':0,'median': 0 ,'std':0}
},
'Origin':{
'accuracy': {'mean': 0,'median': 0,'std': 0},
'f1_macro': {'mean': 0,'median': 0,'std': 0},
'precision': {'mean':0,'median': 0,'std': 0},
'cv': {'mean':0,'median': 0 ,'std':0}
}
}
},
'Pipeline':{
'Normal':{
'Language':{
'accuracy': {'mean': 0,'median': 0,'std': 0},
'f1_macro': {'mean': 0,'median': 0,'std': 0},
'precision': {'mean':0,'median': 0 ,'std':0},
'cv': {'mean':0,'median': 0 ,'std':0}
},
'Language_Family':{
'accuracy': {'mean': 0,'median': 0,'std': 0},
'f1_macro': {'mean': 0,'median': 0,'std': 0},
'precision': {'mean':0,'median': 0 ,'std':0},
'cv': {'mean':0,'median': 0 ,'std':0}
},
'Origin':{
'accuracy': {'mean': 0,'median': 0,'std': 0},
'f1_macro': {'mean': 0,'median': 0,'std': 0},
'precision': {'mean':0,'median': 0 ,'std':0},
'cv': {'mean':0,'median': 0 ,'std':0}
}
},
'TFIDF':{
'Language':{
'accuracy': {'mean': 0,'median': 0,'std': 0},
'f1_macro': {'mean': 0,'median': 0,'std': 0},
'precision': {'mean':0,'median': 0 ,'std':0},
'cv': {'mean':0,'median': 0 ,'std':0}
},
'Language_Family':{
'accuracy': {'mean': 0,'median': 0,'std': 0},
'f1_macro': {'mean': 0,'median': 0,'std': 0},
'precision': {'mean':0,'median': 0,'std': 0},
'cv': {'mean':0,'median': 0 ,'std':0}
},
'Origin':{
'accuracy': {'mean': 0,'median': 0,'std': 0},
'f1_macro': {'mean': 0,'median': 0,'std': 0},
'precision': {'mean':0,'median': 0,'std': 0},
'cv': {'mean':0,'median': 0 ,'std':0}
}
}
},
'LogisticRegression':{
'Normal':{
'Language':{
'accuracy': {'mean': 0,'median': 0,'std': 0},
'f1_macro': {'mean': 0,'median': 0,'std': 0},
'precision': {'mean':0,'median': 0 ,'std':0},
'cv': {'mean':0,'median': 0 ,'std':0}
},
'Language_Family':{
'accuracy': {'mean': 0,'median': 0,'std': 0},
'f1_macro': {'mean': 0,'median': 0,'std': 0},
'precision': {'mean':0,'median': 0 ,'std':0},
'cv': {'mean':0,'median': 0 ,'std':0}
},
'Origin':{
'accuracy': {'mean': 0,'median': 0,'std': 0},
'f1_macro': {'mean': 0,'median': 0,'std': 0},
'precision': {'mean':0,'median': 0 ,'std':0},
'cv': {'mean':0,'median': 0 ,'std':0}
}
},
'TFIDF':{
'Language':{
'accuracy': {'mean': 0,'median': 0,'std': 0},
'f1_macro': {'mean': 0,'median': 0,'std': 0},
'precision': {'mean':0,'median': 0 ,'std':0},
'cv': {'mean':0,'median': 0 ,'std':0}
},
'Language_Family':{
'accuracy': {'mean': 0,'median': 0,'std': 0},
'f1_macro': {'mean': 0,'median': 0,'std': 0},
'precision': {'mean':0,'median': 0,'std': 0},
'cv': {'mean':0,'median': 0 ,'std':0}
},
'Origin':{
'accuracy': {'mean': 0,'median': 0,'std': 0},
'f1_macro': {'mean': 0,'median': 0,'std': 0},
'precision': {'mean':0,'median': 0,'std': 0},
'cv': {'mean':0,'median': 0 ,'std':0}
}
}
},
'SVM':{
'Normal':{
'Language':{
'accuracy': {'mean': 0,'median': 0,'std': 0},
'f1_macro': {'mean': 0,'median': 0,'std': 0},
'precision': {'mean':0,'median': 0 ,'std':0},
'cv': {'mean':0,'median': 0 ,'std':0}
},
'Language_Family':{
'accuracy': {'mean': 0,'median': 0,'std': 0},
'f1_macro': {'mean': 0,'median': 0,'std': 0},
'precision': {'mean':0,'median': 0 ,'std':0},
'cv': {'mean':0,'median': 0 ,'std':0}
},
'Origin':{
'accuracy': {'mean': 0,'median': 0,'std': 0},
'f1_macro': {'mean': 0,'median': 0,'std': 0},
'precision': {'mean':0,'median': 0 ,'std':0},
'cv': {'mean':0,'median': 0 ,'std':0}
}
},
'TFIDF':{
'Language':{
'accuracy': {'mean': 0,'median': 0,'std': 0},
'f1_macro': {'mean': 0,'median': 0,'std': 0},
'precision': {'mean':0,'median': 0 ,'std':0},
'cv': {'mean':0,'median': 0 ,'std':0}
},
'Language_Family':{
'accuracy': {'mean': 0,'median': 0,'std': 0},
'f1_macro': {'mean': 0,'median': 0,'std': 0},
'precision': {'mean':0,'median': 0,'std': 0},
'cv': {'mean':0,'median': 0 ,'std':0}
},
'Origin':{
'accuracy': {'mean': 0,'median': 0,'std': 0},
'f1_macro': {'mean': 0,'median': 0,'std': 0},
'precision': {'mean':0,'median': 0,'std': 0},
'cv': {'mean':0,'median': 0 ,'std':0}
}
}
}
})
for file in fileList:
print(file[1])
print(file)
if(file[1] == 'Origin'):
labels = origin
title = 'Result for Origin prediction'
elif(file[1] == 'Language'):
title = 'Result for Language prediction'
labels = lang
elif(file[1] == 'Language_Family'):
labels = family
title = 'Result for Language Family prediction'
data = pd.read_csv(file[0], header=None, sep=',', skiprows=1)
data.columns = ['accuracy', 'f1_macro', 'f1_micro', 'precision', 'recall', 'prediction', 'actual', 'cv']
data = data[data.accuracy.str.contains('accuracy') == False]
scores = ['accuracy', 'f1_macro', 'precision', 'cv']
values = ['prediction', 'actual']
#
#accuracy = pd.to_numeric(data['accuracy'],downcast='float')
#f1 = pd.to_numeric(data['f1_macro'],downcast='float')
#prec = pd.to_numeric(data['precision'],downcast='float')
for score in scores:
output[file[3]][file[2]][file[1]][score]['mean'] = pd.to_numeric(data[score],downcast='float').mean()
output[file[3]][file[2]][file[1]][score]['median'] = | pd.to_numeric(data[score],downcast='float') | pandas.to_numeric |
#%%
import os
import sys
try:
os.chdir('/Volumes/GoogleDrive/My Drive/python_code/connectome_tools/')
print(os.getcwd())
except:
pass
from pymaid_creds import url, name, password, token
import pymaid
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# allows text to be editable in Illustrator
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
plt.rcParams['font.size'] = 6
rm = pymaid.CatmaidInstance(url, token, name, password)
adj = pd.read_csv('VNC_interaction/data/axon-dendrite.csv', header = 0, index_col = 0)
inputs = pd.read_csv('VNC_interaction/data/input_counts.csv', index_col = 0)
inputs = | pd.DataFrame(inputs.values, index = inputs.index, columns = ['axon_input', 'dendrite_input']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import warnings
from datetime import datetime, timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas import (Timestamp, Timedelta, Series,
DatetimeIndex, TimedeltaIndex,
date_range)
@pytest.fixture(params=[None, 'UTC', 'Asia/Tokyo',
'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific'])
def tz(request):
return request.param
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=str)
def delta(request):
# Several ways of representing two hours
return request.param
@pytest.fixture(
params=[
datetime(2011, 1, 1),
DatetimeIndex(['2011-01-01', '2011-01-02']),
DatetimeIndex(['2011-01-01', '2011-01-02']).tz_localize('US/Eastern'),
np.datetime64('2011-01-01'),
Timestamp('2011-01-01')],
ids=lambda x: type(x).__name__)
def addend(request):
return request.param
class TestDatetimeIndexArithmetic(object):
def test_dti_add_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
idx + Timestamp('2011-01-01')
def test_dti_radd_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
Timestamp('2011-01-01') + idx
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_add_int(self, tz, one):
# Variants of `one` for #19012
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng + one
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
rng += one
tm.assert_index_equal(rng, expected)
def test_dti_sub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng - one
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_isub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
rng -= one
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and timedelta-like
def test_dti_add_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
rng += delta
tm.assert_index_equal(rng, expected)
def test_dti_sub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
def test_dti_isub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
rng -= delta
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract TimedeltaIndex and DatetimeIndex'
with tm.assert_raises_regex(TypeError, msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = 'cannot perform __neg__ with this index type:'
with tm.assert_raises_regex(TypeError, msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract TimedeltaIndex and DatetimeIndex'
with tm.assert_raises_regex(TypeError, msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = '|'.join(['cannot perform __neg__ with this index type:',
'ufunc subtract cannot use operands with types'])
with tm.assert_raises_regex(TypeError, msg):
tdi.values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
def test_add_datetimelike_and_dti(self, addend):
# GH#9631
dti = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = 'cannot add DatetimeIndex and {0}'.format(
type(addend).__name__)
with tm.assert_raises_regex(TypeError, msg):
dti + addend
with tm.assert_raises_regex(TypeError, msg):
addend + dti
def test_add_datetimelike_and_dti_tz(self, addend):
# GH#9631
dti_tz = DatetimeIndex(['2011-01-01',
'2011-01-02']).tz_localize('US/Eastern')
msg = 'cannot add DatetimeIndex and {0}'.format(
type(addend).__name__)
with tm.assert_raises_regex(TypeError, msg):
dti_tz + addend
with tm.assert_raises_regex(TypeError, msg):
addend + dti_tz
# -------------------------------------------------------------
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
dti_tz - dti
with pytest.raises(TypeError):
dti - dti_tz
with pytest.raises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with pytest.raises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
def test_ufunc_coercions(self):
idx = date_range('2011-01-01', periods=3, freq='2D', name='x')
delta = np.timedelta64(1, 'D')
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = date_range('2011-01-02', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = date_range('2010-12-31', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
delta = np.array([np.timedelta64(1, 'D'), np.timedelta64(2, 'D'),
np.timedelta64(3, 'D')])
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = DatetimeIndex(['2011-01-02', '2011-01-05', '2011-01-08'],
freq='3D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '3D'
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = DatetimeIndex(['2010-12-31', '2011-01-01', '2011-01-02'],
freq='D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'D'
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(['now', pd.Timestamp.max])
dtimin = pd.to_datetime(['now', pd.Timestamp.min])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants:
with pytest.raises(OverflowError):
dtimax - variant
expected = pd.Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = pd.Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError):
dtimin - variant
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_dti_add_offset_array(self, tz, box):
# GH#18849
dti = pd.date_range('2017-01-01', periods=2, tz=tz)
other = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
with tm.assert_produces_warning(PerformanceWarning):
res = dti + other
expected = DatetimeIndex([dti[n] + other[n] for n in range(len(dti))],
name=dti.name, freq='infer')
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + dti
tm.assert_index_equal(res2, expected)
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_dti_sub_offset_array(self, tz, box):
# GH#18824
dti = pd.date_range('2017-01-01', periods=2, tz=tz)
other = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
with tm.assert_produces_warning(PerformanceWarning):
res = dti - other
expected = DatetimeIndex([dti[n] - other[n] for n in range(len(dti))],
name=dti.name, freq='infer')
tm.assert_index_equal(res, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_dti_with_offset_series(self, tz, names):
# GH#18849
dti = pd.date_range('2017-01-01', periods=2, tz=tz, name=names[0])
other = Series([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)],
name=names[1])
expected_add = Series([dti[n] + other[n] for n in range(len(dti))],
name=names[2])
with tm.assert_produces_warning(PerformanceWarning):
res = dti + other
tm.assert_series_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + dti
tm.assert_series_equal(res2, expected_add)
expected_sub = Series([dti[n] - other[n] for n in range(len(dti))],
name=names[2])
with tm.assert_produces_warning(PerformanceWarning):
res3 = dti - other
tm.assert_series_equal(res3, expected_sub)
# GH 10699
@pytest.mark.parametrize('klass,assert_func', zip([Series, DatetimeIndex],
[tm.assert_series_equal,
tm.assert_index_equal]))
def test_datetime64_with_DateOffset(klass, assert_func):
s = klass(date_range('2000-01-01', '2000-01-31'), name='a')
result = s + pd.DateOffset(years=1)
result2 = pd.DateOffset(years=1) + s
exp = klass(date_range('2001-01-01', '2001-01-31'), name='a')
assert_func(result, exp)
assert_func(result2, exp)
result = s - pd.DateOffset(years=1)
exp = klass(date_range('1999-01-01', '1999-01-31'), name='a')
assert_func(result, exp)
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
pd.Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = klass([Timestamp('2000-01-16 00:15:00', tz='US/Central'),
Timestamp('2000-02-16', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
pd.Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = klass([Timestamp('2000-01-31 00:15:00', tz='US/Central'),
Timestamp('2000-02-29', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
# array of offsets - valid for Series only
if klass is Series:
with tm.assert_produces_warning(PerformanceWarning):
s = klass([Timestamp('2000-1-1'), Timestamp('2000-2-1')])
result = s + Series([pd.offsets.DateOffset(years=1),
pd.offsets.MonthEnd()])
exp = klass([Timestamp('2001-1-1'), | Timestamp('2000-2-29') | pandas.Timestamp |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from keras.models import load_model
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.utils import Sequence, to_categorical
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.utils import class_weight
from sklearn.metrics import classification_report, confusion_matrix
def preprocessing_seqs(seqs,
alphabet="ACDEFGHIKLMNPQRSTVWY*",
maxlen=35,
padding="post",
truncating="post",
flatten=False):
"""
Sequences encoding schemes
"""
tokenizer = Tokenizer(num_words=len(alphabet),
char_level=True,
lower=False)
tokenizer.fit_on_texts(alphabet)
sequences = tokenizer.texts_to_sequences(seqs)
sequences = np.array(pad_sequences(sequences,
maxlen=maxlen,
padding=padding,
truncating=padding))
encoded_seqs = to_categorical(sequences, len(alphabet))
if flatten: # for Multilayer perceptron model
encoded_seqs = encoded_seqs.reshape(len(seqs), -1)
return encoded_seqs
def preprocessing_features(features, classes):
"""
encode given categorical features for each sample.
"""
encoded_features = label_binarize(features, classes=classes)
return encoded_features
def compute_class_weight(features, classes):
"""
Estimate class weights for unbalanced datasets
class weights will be given by n_samples / (n_classes * np.bincount(y))
"""
# features = [lable.strip() for lables in features for lable in lables.split(",")] # flatten
class_weights = class_weight.compute_class_weight('balanced',
classes,
features)
return class_weights
def preprocessing(seqs,
features,
alphabet,
classes,
maxlen,
flatten=False):
"""
wrapper for preprocessing seqs and features
"""
assert len(seqs) == len(features)
x = preprocessing_seqs(seqs=seqs,
alphabet=alphabet,
maxlen=maxlen,
flatten=flatten)
y = preprocessing_features(features=features, classes=classes)
return x, y
def preprocessing_dataset(file,
alphabet,
classes,
maxlen,
flatten=False):
"""
wrapper for preprocessing seqs and features from tsv file
"""
df = | pd.read_csv(file, sep="\t", header=0) | pandas.read_csv |
import unittest
import pandas as pd
from mavedbconvert import validators, constants, exceptions
class TestHGVSPatternsBackend(unittest.TestCase):
def setUp(self):
self.backend = validators.HGVSPatternsBackend()
def test_validate_hgvs_raise_HGVSValidationError(self):
with self.assertRaises(exceptions.HGVSValidationError):
self.backend.validate("p.1102A>G")
with self.assertRaises(exceptions.HGVSValidationError):
self.backend.validate("x.102A>G")
def test_validate_passes_on_special(self):
self.backend.validate(constants.enrich2_wildtype)
self.backend.validate(constants.enrich2_synonymous)
def test_returns_str_variant(self):
self.assertIsInstance(self.backend.validate("c.1A>G"), str)
class TestValidateHGVS(unittest.TestCase):
def test_uses_patterns_backend_as_default(self):
result = validators.validate_variants(["c.[1A>G;2A>G]"], n_jobs=2, verbose=0)
self.assertIsInstance(result[0], str)
def test_can_specify_backend(self):
backend = validators.HGVSPatternsBackend()
result = validators.validate_variants(
["c.[1A>G;2A>G]"], n_jobs=2, verbose=0, validation_backend=backend
)
self.assertIsInstance(result[0], str)
class TestDfValidators(unittest.TestCase):
def test_validate_column_raise_keyerror_column_not_exist(self):
df = pd.DataFrame({"a": [1]})
with self.assertRaises(KeyError):
validators.validate_has_column(df, "b")
def test_validate_column_passes_when_column_exists(self):
df = pd.DataFrame({"a": [1]})
validators.validate_has_column(df, "a")
def test_error_some_values_non_numeric(self):
df = pd.DataFrame({"A": ["a", 1, 2]})
with self.assertRaises(TypeError):
validators.validate_columns_are_numeric(df)
def test_pass_all_numeric(self):
df = pd.DataFrame({"A": [1, 2, 1.0]})
validators.validate_columns_are_numeric(df)
class TestHGVSValidators(unittest.TestCase):
def test_validate_hgvs_uniqueness(self):
df = pd.DataFrame({constants.nt_variant_col: ["a", "b"]})
validators.validate_hgvs_uniqueness(df, constants.nt_variant_col) # Should pass
df = pd.DataFrame({constants.nt_variant_col: ["a", "b", "a"]})
with self.assertRaises(ValueError):
validators.validate_hgvs_uniqueness(df, constants.nt_variant_col)
# test multi-variant formatting
df = pd.DataFrame({constants.nt_variant_col: list("abcdefg" * 2)})
with self.assertRaises(ValueError) as cm:
validators.validate_hgvs_uniqueness(df, constants.nt_variant_col)
self.assertTrue(str(cm.exception).endswith(", ..."))
def test_validate_hgvs_uniqueness_bad_column(self):
df = | pd.DataFrame({constants.nt_variant_col: ["a", "b", "a"]}) | pandas.DataFrame |
from __future__ import print_function, division
import os
import re
import datetime
import sys
from os.path import join, isdir, isfile, dirname, abspath
import pandas as pd
import yaml
import psycopg2 as db
from nilmtk.measurement import measurement_columns
from nilmtk.measurement import LEVEL_NAMES
from nilmtk.datastore import Key
from nilm_metadata import convert_yaml_to_hdf5
from inspect import currentframe, getfile, getsourcefile
"""
MANUAL:
dataport is a large dataset hosted in a remote SQL database. This
file provides a function to download the dataset and save it to disk
as NILMTK-DF. Since downloading the entire dataset will likely take >
24 hours, this function provides some options to allow you to download
only a subset of the data.
For example, to only load house 26 for April 2014:
from nilmtk.dataset_converters.dataport.download_dataport import download_dataport
download_dataport(
'username',
'password',
'/path/output_filename.h5',
periods_to_load={26: ('2014-04-01', '2014-05-01')}
)
REQUIREMENTS:
On Ubuntu:
* sudo apt-get install libpq-dev
* sudo pip install psycopg2
TODO:
* intelligently handle queries that fail due to network
* integrate 'grid' (use - gen) and 'gen'
"""
feed_mapping = {
'use': {},
'air1': {'type': 'air conditioner'},
'air2': {'type': 'air conditioner'},
'air3': {'type': 'air conditioner'},
'airwindowunit1': {'type': 'air conditioner'},
'aquarium1': {'type': 'appliance'},
'bathroom1': {'type': 'sockets', 'room': 'bathroom'},
'bathroom2': {'type': 'sockets', 'room': 'bathroom'},
'bedroom1': {'type': 'sockets', 'room': 'bedroom'},
'bedroom2': {'type': 'sockets', 'room': 'bedroom'},
'bedroom3': {'type': 'sockets', 'room': 'bedroom'},
'bedroom4': {'type': 'sockets', 'room': 'bedroom'},
'bedroom5': {'type': 'sockets', 'room': 'bedroom'},
'car1': {'type': 'electric vehicle'},
'clotheswasher1': {'type': 'washing machine'},
'clotheswasher_dryg1': {'type': 'washer dryer'},
'diningroom1': {'type': 'sockets', 'room': 'dining room'},
'diningroom2': {'type': 'sockets', 'room': 'dining room'},
'dishwasher1': {'type': 'dish washer'},
'disposal1': {'type': 'waste disposal unit'},
'drye1': {'type': 'spin dryer'},
'dryg1': {'type': 'spin dryer'},
'freezer1': {'type': 'freezer'},
'furnace1': {'type': 'electric furnace'},
'furnace2': {'type': 'electric furnace'},
'garage1': {'type': 'sockets', 'room': 'dining room'},
'garage2': {'type': 'sockets', 'room': 'dining room'},
'gen': {},
'grid': {},
'heater1': {'type': 'electric space heater'},
'housefan1': {'type': 'electric space heater'},
'icemaker1': {'type': 'appliance'},
'jacuzzi1': {'type': 'electric hot tub heater'},
'kitchen1': {'type': 'sockets', 'room': 'kitchen'},
'kitchen2': {'type': 'sockets', 'room': 'kitchen'},
'kitchenapp1': {'type': 'sockets', 'room': 'kitchen'},
'kitchenapp2': {'type': 'sockets', 'room': 'kitchen'},
'lights_plugs1': {'type': 'light'},
'lights_plugs2': {'type': 'light'},
'lights_plugs3': {'type': 'light'},
'lights_plugs4': {'type': 'light'},
'lights_plugs5': {'type': 'light'},
'lights_plugs6': {'type': 'light'},
'livingroom1': {'type': 'sockets', 'room': 'living room'},
'livingroom2': {'type': 'sockets', 'room': 'living room'},
'microwave1': {'type': 'microwave'},
'office1': {'type': 'sockets', 'room': 'office'},
'outsidelights_plugs1': {'type': 'sockets', 'room': 'outside'},
'outsidelights_plugs2': {'type': 'sockets', 'room': 'outside'},
'oven1': {'type': 'oven'},
'oven2': {'type': 'oven'},
'pool1': {'type': 'electric swimming pool heater'},
'pool2': {'type': 'electric swimming pool heater'},
'poollight1': {'type': 'light'},
'poolpump1': {'type': 'electric swimming pool heater'},
'pump1': {'type': 'appliance'},
'range1': {'type': 'stove'},
'refrigerator1': {'type': 'fridge'},
'refrigerator2': {'type': 'fridge'},
'security1': {'type': 'security alarm'},
'shed1': {'type': 'sockets', 'room': 'shed'},
'sprinkler1': {'type': 'appliance'},
'unknown1': {'type': 'unknown'},
'unknown2': {'type': 'unknown'},
'unknown3': {'type': 'unknown'},
'unknown4': {'type': 'unknown'},
'utilityroom1': {'type': 'sockets', 'room': 'utility room'},
'venthood1': {'type': 'appliance'},
'waterheater1': {'type': 'electric water heating appliance'},
'waterheater2': {'type': 'electric water heating appliance'},
'winecooler1': {'type': 'appliance'},
}
feed_ignore = ['gen', 'grid']
def download_dataport(database_username, database_password,
hdf_filename, periods_to_load=None):
"""
Downloads data from dataport database into an HDF5 file.
Parameters
----------
hdf_filename : str
Output HDF filename. If file exists already then will be deleted.
database_username, database_password : str
periods_to_load : dict of tuples, optional
Key of dict is the building number (int).
Values are (<start date>, <end date>)
e.g. ("2013-04-01", None) or ("2013-04-01", "2013-08-01")
defaults to all buildings and all date ranges
"""
# dataport database settings
database_host = 'dataport.pecanstreet.org'
database_port = '5434'
database_name = 'postgres'
database_schema = 'university'
# try to connect to database
try:
conn = db.connect('host=' + database_host +
' port=' + database_port +
' dbname=' + database_name +
' user=' + database_username +
' password=' + database_password)
except:
print('Could not connect to remote database')
raise
# set up a new HDF5 datastore (overwrites existing store)
store = pd.HDFStore(hdf_filename, 'w', complevel=9, complib='zlib')
# remove existing building yaml files in module dir
for f in os.listdir(join(_get_module_directory(), 'metadata')):
if re.search('^building', f):
os.remove(join(_get_module_directory(), 'metadata', f))
"""
TODO:
The section below can be altered or removed, since the restructured Dataport
now has only one electricity_egauge_minutes table.
"""
# get tables in database schema
sql_query = ("SELECT table_name" +
" FROM information_schema.views" +
" WHERE table_schema ='" + database_schema + "'" +
" ORDER BY table_name")
database_tables = pd.read_sql(sql_query, conn)['table_name'].tolist()
database_tables = [t for t in database_tables if 'electricity_egauge_minutes' in t]
# if user has specified buildings
if periods_to_load:
buildings_to_load = list(periods_to_load.keys())
else:
# get buildings present in all tables
sql_query = ''
for table in database_tables:
sql_query = (sql_query + '(SELECT DISTINCT dataid' +
' FROM "' + database_schema + '".' + table +
') UNION ')
sql_query = sql_query[:-7]
sql_query = (sql_query + ' ORDER BY dataid')
buildings_to_load = pd.read_sql(sql_query, conn)['dataid'].tolist()
# for each user specified building or all buildings in database
for building_id in buildings_to_load:
print("Loading building {:d} @ {}"
.format(building_id, datetime.datetime.now()))
sys.stdout.flush()
# create new list of chunks for concatenating later
dataframe_list = []
# for each table of 1 month data
for database_table in database_tables:
print(" Loading table {:s}".format(database_table))
sys.stdout.flush()
# get buildings present in electricity_egauge_minutes table
sql_query = ('SELECT DISTINCT dataid' +
' FROM university.metadata' +
' WHERE egauge_min_time IS NOT NULL' +
' ORDER BY dataid')
buildings_in_table = pd.read_sql(sql_query, conn)['dataid'].tolist()
if building_id in buildings_in_table:
# get first and last timestamps for this house in electricity_egauge_minutes table
sql_query = ('SELECT MIN(egauge_min_time) AS minlocalminute,' +
' MAX(egauge_max_time) AS maxlocalminute' +
' FROM university.metadata' +
' WHERE dataid=' + str(building_id))
range = | pd.read_sql(sql_query, conn) | pandas.read_sql |
import requests
import json
import arrow
from datetime import datetime
from requests.auth import HTTPBasicAuth
import numpy as np
import pandas as pd
from datetime import date, datetime, timedelta as td
#######################
#### aTimeLogger #####
######################
# Modified from https://github.com/YujiShen/TimeReport/blob/master/time_api.py
def get_types(auth_header):
"""
Retrieve types data from aTimeLogger.
:param auth_header: auth header for request data.
:return: A dataframe for types data.
"""
r_type = requests.get("https://app.atimelogger.com/api/v2/types",
auth=auth_header)
types = json.loads(r_type.text)
tdf = pd.DataFrame.from_dict(types['types'])
return tdf
def get_intervals(auth_header, start_date, end_date, timezone):
"""
Retrieve new intervals data from aTimeLogger. New intervals is defined by start and end.
:param auth_header: auth header for request data.
:return: A dataframe for intervals data.
Limited to 100 entries regardless of date range
"""
start_datetime = arrow.get( | pd.to_datetime(start_date) | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This file makes the Supplementary Figure 5, it needs the filter_SRAG.py
results to run.
"""
import pandas as pd
import numpy as np
import datetime
import matplotlib.pyplot as plt
data_init = pd.read_csv('../Data/SRAG_filtered_morb.csv')
data_init = data_init[(data_init.EVOLUCAO==1)|(data_init.EVOLUCAO==2)]
for col in data_init.columns:
if (col[:2] == 'DT') or (col[:4] == 'DOSE'):
data_init.loc[:,col] = pd.to_datetime(data_init[col], format='%Y/%m/%d', errors='coerce')
data_init['ti'] = (data_init.DT_EVOLUCA - data_init.DT_INTERNA).dt.days
cases, td = np.histogram(data_init.ti, bins=np.arange(0, 90))
deaths, td = np.histogram(data_init.ti[data_init.EVOLUCAO==2], bins=np.arange(0, 90))
td = td[:-1]
plt.figure()
plt.plot(td, deaths/cases)
plt.ylabel('Mortality')
plt.xlabel('Stay Duration (days)')
plt.xlim([-0.5,89])
plt.ylim([0.2,0.7])
plt.grid()
plt.tight_layout()
s = {'days': td, 'mortality': deaths/cases}
s = | pd.DataFrame(s) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from evalml.automl import get_default_primary_search_objective
from evalml.data_checks import (
DataCheckAction,
DataCheckActionCode,
DataCheckError,
DataCheckMessageCode,
DataChecks,
DataCheckWarning,
InvalidTargetDataCheck,
)
from evalml.exceptions import DataCheckInitError
from evalml.objectives import (
MAPE,
MeanSquaredLogError,
RootMeanSquaredLogError,
)
from evalml.problem_types import (
ProblemTypes,
is_binary,
is_multiclass,
is_regression,
)
from evalml.utils.woodwork_utils import numeric_and_boolean_ww
invalid_targets_data_check_name = InvalidTargetDataCheck.name
def test_invalid_target_data_check_invalid_n_unique():
with pytest.raises(
ValueError, match="`n_unique` must be a non-negative integer value."
):
InvalidTargetDataCheck(
"regression",
get_default_primary_search_objective("regression"),
n_unique=-1,
)
def test_invalid_target_data_check_nan_error():
X = pd.DataFrame({"col": [1, 2, 3]})
invalid_targets_check = InvalidTargetDataCheck(
"regression", get_default_primary_search_objective("regression")
)
assert invalid_targets_check.validate(X, y=pd.Series([1, 2, 3])) == {
"warnings": [],
"errors": [],
"actions": [],
}
assert invalid_targets_check.validate(X, y=pd.Series([np.nan, np.nan, np.nan])) == {
"warnings": [],
"errors": [
DataCheckError(
message="Target is either empty or fully null.",
data_check_name=invalid_targets_data_check_name,
message_code=DataCheckMessageCode.TARGET_IS_EMPTY_OR_FULLY_NULL,
details={},
).to_dict(),
],
"actions": [],
}
def test_invalid_target_data_check_numeric_binary_classification_valid_float():
y = pd.Series([0.0, 1.0, 0.0, 1.0])
X = pd.DataFrame({"col": range(len(y))})
invalid_targets_check = InvalidTargetDataCheck(
"binary", get_default_primary_search_objective("binary")
)
assert invalid_targets_check.validate(X, y) == {
"warnings": [],
"errors": [],
"actions": [],
}
def test_invalid_target_data_check_multiclass_two_examples_per_class():
y = pd.Series([0] + [1] * 19 + [2] * 80)
X = pd.DataFrame({"col": range(len(y))})
invalid_targets_check = InvalidTargetDataCheck(
"multiclass", get_default_primary_search_objective("binary")
)
expected_message = "Target does not have at least two instances per class which is required for multiclass classification"
# with 1 class not having min 2 instances
assert invalid_targets_check.validate(X, y) == {
"warnings": [],
"errors": [
DataCheckError(
message=expected_message,
data_check_name=invalid_targets_data_check_name,
message_code=DataCheckMessageCode.TARGET_MULTICLASS_NOT_TWO_EXAMPLES_PER_CLASS,
details={"least_populated_class_labels": [0]},
).to_dict()
],
"actions": [],
}
y = pd.Series([0] + [1] + [2] * 98)
X = pd.DataFrame({"col": range(len(y))})
# with 2 classes not having min 2 instances
assert invalid_targets_check.validate(X, y) == {
"warnings": [],
"errors": [
DataCheckError(
message=expected_message,
data_check_name=invalid_targets_data_check_name,
message_code=DataCheckMessageCode.TARGET_MULTICLASS_NOT_TWO_EXAMPLES_PER_CLASS,
details={"least_populated_class_labels": [0, 1]},
).to_dict()
],
"actions": [],
}
@pytest.mark.parametrize(
"pd_type", ["int16", "int32", "int64", "float16", "float32", "float64", "bool"]
)
def test_invalid_target_data_check_invalid_pandas_data_types_error(pd_type):
y = pd.Series([0, 1, 0, 0, 1, 0, 1, 0])
y = y.astype(pd_type)
X = pd.DataFrame({"col": range(len(y))})
invalid_targets_check = InvalidTargetDataCheck(
"binary", get_default_primary_search_objective("binary")
)
assert invalid_targets_check.validate(X, y) == {
"warnings": [],
"errors": [],
"actions": [],
}
y = pd.Series(pd.date_range("2000-02-03", periods=5, freq="W"))
X = pd.DataFrame({"col": range(len(y))})
unique_values = y.value_counts().index.tolist()
assert invalid_targets_check.validate(X, y) == {
"warnings": [],
"errors": [
DataCheckError(
message="Target is unsupported {} type. Valid Woodwork logical types include: {}".format(
"Datetime",
", ".join([ltype for ltype in numeric_and_boolean_ww]),
),
data_check_name=invalid_targets_data_check_name,
message_code=DataCheckMessageCode.TARGET_UNSUPPORTED_TYPE,
details={"unsupported_type": "datetime"},
).to_dict(),
DataCheckError(
message="Binary class targets require exactly two unique values.",
data_check_name=invalid_targets_data_check_name,
message_code=DataCheckMessageCode.TARGET_BINARY_NOT_TWO_UNIQUE_VALUES,
details={"target_values": unique_values},
).to_dict(),
],
"actions": [],
}
def test_invalid_target_y_none():
invalid_targets_check = InvalidTargetDataCheck(
"binary", get_default_primary_search_objective("binary")
)
assert invalid_targets_check.validate(pd.DataFrame(), y=None) == {
"warnings": [],
"errors": [
DataCheckError(
message="Target is None",
data_check_name=invalid_targets_data_check_name,
message_code=DataCheckMessageCode.TARGET_IS_NONE,
details={},
).to_dict()
],
"actions": [],
}
def test_invalid_target_data_input_formats():
invalid_targets_check = InvalidTargetDataCheck(
"binary", get_default_primary_search_objective("binary")
)
# test empty pd.Series
X = pd.DataFrame()
messages = invalid_targets_check.validate(X, | pd.Series() | pandas.Series |
from __future__ import absolute_import
import random
import time
import logbook
import pandas as pd
import requests
from cnswd.websource.base import friendly_download
from cnswd.websource._selenium import make_headless_browser
log = logbook.Logger('提取成交明细网页数据')
BASE_URL_FMT = 'http://vip.stock.finance.sina.com.cn/quotes_service/view/vMS_tradehistory.php?symbol={symbol}&date={date_str}'
DATE_FMT = '%Y-%-m-%-d' # 不填充0
def _add_prefix(stock_code):
"""查询代码"""
pre = stock_code[0]
if pre == '6':
return 'sh{}'.format(stock_code)
else:
return 'sz{}'.format(stock_code)
def _to_str(date):
"""转换为查询日期格式"""
# Visual Studio 不能直接处理
# return pd.Timestamp(date).strftime(DATE_FMT)
dt_stru = pd.Timestamp(date).timetuple()
return str(dt_stru.tm_year) + '-' + str(dt_stru.tm_mon) + '-' + str(dt_stru.tm_mday)
def _query_url(code, date):
"""查询url"""
symbol = _add_prefix(code)
date_str = _to_str(date)
return BASE_URL_FMT.format(symbol=symbol, date_str=date_str)
def _fix_data(df, code, date):
"""整理数据框"""
df.columns = ['成交时间', '成交价', '价格变动', '成交量', '成交额', '性质']
date_str = _to_str(date)
df.成交时间 = df.成交时间.map(lambda x: pd.Timestamp('{} {}'.format(date_str, x)))
df['股票代码'] = code
# df['涨跌幅'] = df['涨跌幅'].str.replace('%', '').astype(float) * 0.01
df['成交量'] = df['成交量'] * 100
df = df.sort_values('成交时间')
return df
def _get_cjmx_1(code, date):
url_fmt = 'http://vip.stock.finance.sina.com.cn/quotes_service/view/vMS_tradehistory.php?symbol={symbol_}&date={date_str}&page={page}'
dfs = []
symbol_ = _add_prefix(code)
d = pd.Timestamp(date)
if d < pd.Timestamp('today').normalize() - | pd.Timedelta(days=20) | pandas.Timedelta |
# coding: utf-8
# # Laterality Curves
# ### import modules
#
## In[1]:
#
#get_ipython().magic(u'matplotlib inline')
#
#
# In[2]:
from nilearn import input_data, image, plotting
import os
import sys
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# ### get absolute directory of project
# In[3]:
# after converstion to .py, we can use __file__ to get the module folder
try:
thisDir = os.path.realpath(__file__)
# in notebook form, we take the current working directory (we need to be in 'notebooks/' for this!)
except:
thisDir = '.'
# convert relative path into absolute path, so this will work with notebooks and py modules
supDir = os.path.abspath(os.path.join(os.path.dirname(thisDir), '..'))
supDir
# ### get tMap
#
## In[4]:
#
#tDf = pd.DataFrame([
# '%s/data/raw/nii/%s' % (supDir, x)
# for x in os.listdir('%s/data/raw/nii' % supDir) if x.startswith('tMap')
#])
#tDf.columns = ['tMaps']
#tDf.index = [x.split('tMap_')[-1].split('.')[0] for x in tDf['tMaps']]
#tDf.sort_index(inplace=True)
#
#
#
## In[5]:
#
#tDf.shape
#
#
#
## In[6]:
#
#tDf.tail()
#
#
#
## In[7]:
#
#tMap = tDf.iloc[35].tMaps
#
#
#
## In[8]:
#
#tMap
#
#
# Activity map in native space on brain-extracted structural image
#
## In[10]:
#
#plotting.plot_stat_map(tMap,threshold=0);
#
#
# ### make a mask based on the stats map
# We use a masker which encompasses all valid voxel in the tMap
# In[11]:
masker = input_data.NiftiMasker(mask_img='%s/data/external/MNI152_T1_2mm_brain_mask.nii.gz'%supDir).fit()
#
## In[12]:
#
#plotting.plot_roi(masker.mask_img_)
#
#
# ### get the language ROIs
# In[13]:
languageLeft = '%s/models/tMap_diff_left.nii.gz' % supDir
languageRight = '%s/models/tMap_diff_right.nii.gz' % supDir
#
## In[15]:
#
#plotting.plot_stat_map(languageLeft,draw_cross=False,cut_coords=(-45,15,15));
#
#
# ### binarize the language ROI at a specific threshold
# In[16]:
def makeBinMaskAbs(roi,thresh,masker=masker):
# turn roi image into array
data = masker.transform( roi )
# binarize at threshold
data[data>=thresh] = 1
data[data!=1] = 0
# turn array back to image
img = masker.inverse_transform(data)
return img
#
## In[17]:
#
#d = {}
#for pc in np.linspace(1,15,100):
# threshRoi = makeBinMaskAbs(languageLeft,pc)
# a = threshRoi.get_data()
# d[pc] = a[a>0].shape[-1]
#
#
#
## In[18]:
#
#pd.DataFrame(d,index=['#voxel']).T.plot()
#
#
# In[19]:
def makeBinMask(roi,pc,masker=masker):
# turn roi image into array
data = masker.transform( roi )
# get rid of negative values
cutData = data.copy()
cutData = cutData[cutData>0.01]
# get score corresponding to percentile
thresh = np.percentile(cutData,pc)
# binarize at threshold
data[data>=thresh] = 1
data[data!=1] = 0
# turn array back to image
img = masker.inverse_transform(data)
return img
# Here, it is important that we use percentiles, because we would like to modulate ROI size in a linear fashion, but if we use absolute values for thresholding this will not be the case (i.e. it is harder to cross higher thresholds, so 10 and 11 for example would be nearly the same while 1 and 2 would be very different). Also, it is good to keep the smalles ROis still relatively large, as we are only counting voxel and counting doesn't make sense for small ROIs.
#
## In[20]:
#
#d = {}
#for pc in np.linspace(0,100,100):
# threshRoi = makeBinMask(languageLeft,pc)
# a = threshRoi.get_data()
# d[pc] = a[a>0].shape[-1]
#pd.DataFrame(d,index=['#voxel']).T.plot()
#
#
# Example:
#
## In[19]:
#
#for thresh in np.arange(0,100,5):
# threshRoi = makeBinMask(languageLeft,thresh)
# plotting.plot_roi(threshRoi,
# bg_img='%s/data/external/ch2better.nii.gz' % supDir,
# draw_cross=False,
# cut_coords=(-45,15,15),
# black_bg=False,
# colorbar=False,
# title=thresh)
# plt.show()
#
#
#
## In[22]:
#
#fig = plt.figure(figsize=(7,11))
#
#ax = plt.subplot(3,1,1)
#ax = plotting.plot_stat_map('%s/data/processed/nii/tMap_diff_values.nii.gz' % supDir,
# bg_img='%s/data/external/ch2better.nii.gz' % supDir,
# draw_cross=False,
# cut_coords=(-45,15,15),
# black_bg=False,
# colorbar=False,
# axes=ax,
# title='difference map');
#
#for i,thresh in enumerate([0,95]):
# ax = plt.subplot(3,1,i+2)
# threshRoi = makeBinMask(languageLeft,thresh)
# ax = plotting.plot_roi(threshRoi,
# bg_img='%s/data/external/ch2better.nii.gz' % supDir,
# draw_cross=False,
# cut_coords=(-45,15,15),
# black_bg=False,
# title='percentile %s'%thresh,
# axes=ax)
#plt.close()
#
#
#
## In[23]:
#
#fig
#
#
#
## In[24]:
#
#fig.savefig('%s/reports/figures/04-roi-generation.png' % supDir,dpi=300,bbox_inches='tight')
#
#
# ### get ROI voxels into dataFrame
# In[25]:
def makeRoiDf(tFile,roiFile):
# get the range of valid voxel
tBinIm = image.math_img('abs(img1)>0.001',img1=tFile)
interimMasker = input_data.NiftiMasker(mask_img=tBinIm)
interimMasker.fit()
# get the overlap of valid voxel and the mask
roiMask = interimMasker.inverse_transform(interimMasker.transform(roiFile))
# initialize the final masker which is the overlap of tMap and roiFile
masker = input_data.NiftiMasker(mask_img=roiMask).fit()
# get data into array
a = masker.transform( [tFile] )
return a
#
## In[26]:
#
#makeRoiDf(tMap,threshRoi)
#
#
# ### do everything for one ROI
# In[27]:
def getRoiData(tMap,roi,thresh):
# binarize the language ROI using the masker
threshRoi = makeBinMask(roi,thresh)
# use the binary language ROI as a masker for extracting the tMap data
roiData = makeRoiDf(tMap,threshRoi)[-1]
return roiData
#
## In[28]:
#
#roiData = getRoiData(tMap,languageLeft,1)
#
#
#
## In[29]:
#
#roiData
#
#
# ### count number of above-threshold voxel in ROI for a range of thresholds
#
# here, thresholding is done in 0.1 steps; output is the percentage of above-threshold voxel (relative to ROi size)
# In[30]:
def getThresh(roiData,sideName):
# empty dict to write to
d = {}
# number of voxel in ROI
num_total = roiData.shape[-1]
# fixed range of values (it does not make sense to go outside these boundaries)
myMin = -10.0
myMax = 20.0
# loop through the whole range of t-Values in the ROI (from min to max)
for threshold in np.arange(myMin,myMax,0.1):
# find all above-threshold voxel and count them
num_above = np.where(roiData>threshold)[-1].shape[-1]
# get the percentage by dividing through total number of voxel in ROI
percent_above = num_above/float(num_total)*100
# in the dict, each key is a threshold and its value is the number of voxel above that threshold
d[round(threshold,1)] = percent_above
return pd.DataFrame(d,index=[sideName]).T
# Example:
#
## In[31]:
#
#thisDf = getThresh(roiData,'left')
#
#
#
## In[32]:
#
#thisDf.T
#
#
#
## In[33]:
#
#plt.plot( thisDf );
#plt.xlabel('t Value')
#plt.ylabel('% voxel above threshold')
#plt.title('thresholding in left-sided language ROI')
#plt.show()
#
#
# ### do this for both ROIs
# Adjust ranges
# In[34]:
def fillMissing(df):
'''
this makes sure that we do not have nans
'''
mergeDf = df.copy()
# loop through all ROIs
for roi in ['left','right']:
# get the max and min value
thisMax = mergeDf.idxmax()[roi]
thisMin = mergeDf.idxmin()[roi]
# fill everything above the max value with 100
mergeDf[roi].loc[:thisMax].fillna(100.,inplace=True)
# fill everything below the min value with 0
mergeDf[roi].loc[thisMin:].fillna(0.,inplace=True)
return mergeDf
# Combine
# In[35]:
def makeParticipant(tMap,pc):
roiData = getRoiData(tMap,languageLeft,pc)
threshDataLeft = getThresh(roiData,'left')
roiData = getRoiData(tMap,languageRight,pc)
threshDataRight = getThresh(roiData,'right')
allRoiDf = | pd.concat([threshDataLeft,threshDataRight],axis=1) | pandas.concat |
# coding=utf-8
import numpy as np
from scipy.stats import norm
import pandas as pd
def LHS_norm(N, mean, cv):
"""
:param std:数据标准差
:param mean:数据均值
:param N:拉丁超立方层数
:return:样本数据
"""
result = np.empty([N])
d = 1.0 / N
for j in range(N):
result[j] = np.random.uniform(low=j * d, high=(j + 1) * d, size=1)[0]
np.random.shuffle(result)
result = norm.ppf(result)
result = pd.Series(result * cv * mean + mean)
return result
if __name__ == '__main__':
job = []
n = 300
for i in range(100, 300):
job.append('job%d'%(i+1))
job = | pd.Series(job) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@title: Non-Exhaustive Gaussian Mixture Generative Adversarial Networks (NE-GM-GAN)
@topic: Generate qualified dataset from raw data
@author: <NAME>, <NAME>
@run: python gen_Data.py KDD99 ../data/
"""
import os
import sys
import numpy as np
import pandas as pd
from utils import compute_loss, dump_pickle
# For Network Intrusion Dataset -----
def onehot_embedding(df, name):
# Employ one-hot embedding on categorical values (i.e. [1,0,0],[0,1,0],[0,0,1] for red,green,blue)
dummies = pd.get_dummies(df.loc[:,name]) # one-hot embedding for selected columns
for x in dummies.columns:
dummy_name = "{}-{}".format(name, x) # rename
df.loc[:, dummy_name] = dummies[x] # add new columns to the dataframe
df.drop(name, axis=1, inplace=True) # remove original columns
def update_label(df_data, inliers, outliers, label_name="labels"):
# Replace the string with integer
labels = df_data[label_name].copy()
labels = np.array(labels)
for l in range(len(list(labels))):
if labels[l] in outliers:
for c in range(len(outliers)):
if labels[l] == outliers[c]:
labels[l] = c+1
elif labels[l] in inliers:
labels[l] = 0
df_data[label_name] = labels
return df_data
def split_network_data(df_data, label_name="label", test_ratio=0.2, seed=42):
# Split the train/test data on network intrusion dataset
df_inliers = df_data[df_data[label_name] == 0]
df_outliers = df_data[df_data[label_name] != 0]
df_test1 = df_inliers.sample(frac=test_ratio, random_state=seed)
df_test = pd.concat([df_test1, df_outliers], axis=0)
df_train = df_inliers[~df_inliers.index.isin(df_test1.index)]
print("The Shape of Train/Test: {0}, {1}.".format(df_train.shape, df_test.shape))
return df_train, df_test
def generate_KDD99(data_path, LABEL_NAME="label"):
# Generate KDD99 dataset
# Read dataset
col_names = ["duration","protocol_type","service","flag","src_bytes", \
"dst_bytes","land","wrong_fragment","urgent","hot","num_failed_logins", \
"logged_in","num_compromised","root_shell","su_attempted","num_root", \
"num_file_creations","num_shells","num_access_files","num_outbound_cmds", \
"is_host_login","is_guest_login","count","srv_count","serror_rate", \
"srv_serror_rate","rerror_rate","srv_rerror_rate","same_srv_rate", \
"diff_srv_rate","srv_diff_host_rate","dst_host_count","dst_host_srv_count", \
"dst_host_same_srv_rate","dst_host_diff_srv_rate","dst_host_same_src_port_rate", \
"dst_host_srv_diff_host_rate","dst_host_serror_rate","dst_host_srv_serror_rate", \
"dst_host_rerror_rate","dst_host_srv_rerror_rate","label"]
df_data = pd.read_csv(data_path, header=None, names=col_names)
# Implement one-hot embedding
text_l = ['protocol_type', 'service', 'flag', 'land', 'logged_in', 'is_host_login', 'is_guest_login']
for name in text_l:
onehot_embedding(df_data, name)
# Rename the order of labels for outliers
outliers = ['neptune.', 'normal.', 'back.', 'satan.', 'ipsweep.', 'portsweep.', 'warezclient.', 'teardrop.']
labels = df_data[LABEL_NAME].copy()
for l in range(len(labels)):
if labels[l] in outliers:
for c in range(len(outliers)):
if labels[l] == outliers[c]:
labels[l] = c+1
else:
labels[l] = 0
df_data[LABEL_NAME] = labels
# Split the train, val, test set
df_train, df_test = split_network_data(df_data, label_name=LABEL_NAME, test_ratio=0.2, seed=42)
return df_train, df_test
def generate_NSLKDD(data_path, LABEL_NAME="labels"):
# Generate NSL-KDD dataset
# Read dataset
col_names = ["duration","protocol_type","service","flag","src_bytes",
"dst_bytes","land","wrong_fragment","urgent","hot","num_failed_logins",
"logged_in","num_compromised","root_shell","su_attempted","num_root",
"num_file_creations","num_shells","num_access_files","num_outbound_cmds",
"is_host_login","is_guest_login","count","srv_count","serror_rate",
"srv_serror_rate","rerror_rate","srv_rerror_rate","same_srv_rate",
"diff_srv_rate","srv_diff_host_rate","dst_host_count","dst_host_srv_count",
"dst_host_same_srv_rate","dst_host_diff_srv_rate","dst_host_same_src_port_rate",
"dst_host_srv_diff_host_rate","dst_host_serror_rate","dst_host_srv_serror_rate",
"dst_host_rerror_rate","dst_host_srv_rerror_rate","labels","labels_int"] # len = 43
df_train = pd.read_csv(data_path+"Train.txt", header=None, names=col_names)
df_test = pd.read_csv(data_path+"Test.txt", header=None, names=col_names)
df_data = pd.concat([df_train, df_test], ignore_index=True)
df_data.drop('labels_int', axis=1, inplace=True)
# Drop columns whose binary values are almost 0
df_data_ohe = df_data.copy() # copy the dataframe for encoding
columns_drop_list = ['land', 'num_outbound_cmds', 'is_host_login']
for col in columns_drop_list:
df_data_ohe.drop(col, axis=1, inplace=True)
# Select features for one-hot embedding
columns_encoding_list = ['protocol_type', 'service', 'flag', 'logged_in', 'is_guest_login']
for col in columns_encoding_list:
onehot_embedding(df_data_ohe, col)
# Decide the classes of inliers & outliers
inliers = ['normal', 'buffer_overflow', 'land', 'multihop', 'rootkit', 'named', \
'ps', 'sendmail', 'xterm', 'imap', 'ftp_write', 'loadmodule', 'xlock', \
'phf', 'perl', 'xsnoop', 'worm', 'udpstorm', 'spy', 'sqlattack']
outliers = ['neptune', 'satan', 'ipsweep', 'smurf', 'portsweep', 'nmap', 'back', 'guess_passwd']
class_ = inliers + outliers
# Extract classes and reform the new dataframe
df_data = df_data_ohe[df_data_ohe[LABEL_NAME].isin(class_)]
# Replace the string with integer
df_data = update_label(df_data, inliers, outliers, label_name=LABEL_NAME)
# Split dataset into train/test set
df_train, df_test = split_network_data(df_data, label_name=LABEL_NAME, test_ratio=0.2, seed=42)
return df_train, df_test
def generate_UNSWNB15(data_path, LABEL_NAME="attack_cat"):
# Generate UNSW-NB15 dataset
# Read dataset
df_data = | pd.read_csv(data_path+"train.csv") | pandas.read_csv |
########################################################################
# Copyright 2020 Battelle Energy Alliance, LLC ALL RIGHTS RESERVED #
# Mobility Systems & Analytics Group, Idaho National Laboratory #
########################################################################
import pyodbc
import pandas as pd
import pickle
import datetime
import time
import math
import yaml
#import geopandas
#import shapely
from pathlib import Path
import csv
import numpy as np
from sklearn.cluster import DBSCAN
from shapely import geometry
from shapely.geometry import MultiPoint
from haversine import haversine, Unit
import pynput
class cfg():
with open('locationGeneralizer.yml') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
odbcConnectionString=config['odbcConnectionString']
inputTableOrCSV= config['inputTableOrCSV']
vehiclesInChunk = config['vehiclesInChunk']
qryVehicleIDList =config['qryVehicleIDList']
qryVehicleInfo = config['qryVehicleInfo']
qryVehicleIDList = qryVehicleIDList.replace('{inputsrc}', inputTableOrCSV)
qryVehicleInfo = qryVehicleInfo.replace('{inputsrc}', inputTableOrCSV)
errorLogFileName = config['errorLogFileName']
heartbeatFileName = config['heartbeatFileName']
locationInfoFileName = config['locationInfoFileName']
homeInfoFileName = config['homeInfoFileName']
pklCensusDivisionsFileName = config['pklCensusDivisionsFileName']
evseLookupFileName = config['evseLookupFileName']
bboxes = config['boundingBoxes']
gpsOdoThreshold_mi = config['gpsOdoThreshold_mi']
minTrips = config['minTrips']
minLastTrips = config['minLastTrips']
minPctParks = config['minPctParks']
distancePlaces = config['distancePlaces']
dayEndHours = config['dayEndHours']
dayEndMinutes = config['dayEndMinutes']
dbscan_eps_ft = config['dbscan_eps_ft']
dbscan_min_spls = config['dbscan_min_spls']
evseDistRange_Miles = config['evseDistRange_Miles']
evseLatRange = config['evseLatRange']
evseLonRange = config['evseLonRange']
hdrErrorLogCSV = config['hdrErrorLogCSV']
hdrLocationInfoCSV = config['hdrLocationInfoCSV']
hdrHomeInfoCSV = config['hdrHomeInfoCSV']
colLocationInfo = config['colLocationInfo']
colHomeInfo = config['colHomeInfo']
verbose = 0
stopProcessing = False
errFilePath = Path(errorLogFileName)
if not errFilePath.exists():
# ErroLog output file
hdr = pd.DataFrame(hdrErrorLogCSV)
hdr.to_csv(errorLogFileName, index=False, header=False, mode='w')
# use one line buffering - every line written is flushed to disk
errorFile = open(errorLogFileName, mode='a', buffering=1, newline='')
errorWriter = csv.writer(errorFile)
def on_press(key):
if hasattr(key, 'char'):
if key.char == 'v':
cfg.verbose = (cfg.verbose + 1) % 3 # verbosity levels: 0, 1, 2
print('Verbosity: {}'.format(cfg.verbose))
if key.char == 'q':
cfg.stopProcessing = not cfg.stopProcessing
if cfg.stopProcessing:
print("Processing will stop after current vehicle.")
else:
print("Stop canceled, processing will continue.")
def main():
listener = pynput.keyboard.Listener(on_press=on_press, suppress=True)
listener.start()
# for vehicle proceesing rate
vst = datetime.datetime.now()
# trust chained assignments (no warnings)
pd.set_option('mode.chained_assignment', None)
# LocationInfo output file
locationFilePath = Path(cfg.locationInfoFileName)
if not locationFilePath.exists():
hdr = | pd.DataFrame(cfg.hdrLocationInfoCSV) | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas import (Series, isnull, date_range,
MultiIndex, Index)
from pandas.tseries.index import Timestamp
from pandas.compat import range
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.pchip missing')
def _skip_if_no_akima():
try:
from scipy.interpolate import Akima1DInterpolator # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.Akima1DInterpolator missing')
class TestSeriesMissingData(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_timedelta_fillna(self):
# GH 3371
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
td = s.diff()
# reg fillna
result = td.fillna(0)
expected = Series([timedelta(0), timedelta(0), timedelta(1), timedelta(
days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
# interprested as seconds
result = td.fillna(1)
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(timedelta(days=1, seconds=1))
expected = Series([timedelta(days=1, seconds=1), timedelta(
0), timedelta(1), timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(np.timedelta64(int(1e9)))
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
from pandas import tslib
result = td.fillna(tslib.NaT)
expected = Series([tslib.NaT, timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)],
dtype='m8[ns]')
assert_series_equal(result, expected)
# ffill
td[2] = np.nan
result = td.ffill()
expected = td.fillna(0)
expected[0] = np.nan
assert_series_equal(result, expected)
# bfill
td[2] = np.nan
result = td.bfill()
expected = td.fillna(0)
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
assert_series_equal(result, expected)
def test_datetime64_fillna(self):
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
s[2] = np.nan
# reg fillna
result = s.fillna(Timestamp('20130104'))
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130104'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
from pandas import tslib
result = s.fillna(tslib.NaT)
expected = s
assert_series_equal(result, expected)
# ffill
result = s.ffill()
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130101'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
# bfill
result = s.bfill()
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130103 9:01:01'), Timestamp(
'20130103 9:01:01')])
assert_series_equal(result, expected)
# GH 6587
# make sure that we are treating as integer when filling
# this also tests inference of a datetime-like with NaT's
s = Series([pd.NaT, pd.NaT, '2013-08-05 15:30:00.000001'])
expected = Series(
['2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001',
'2013-08-05 15:30:00.000001'], dtype='M8[ns]')
result = s.fillna(method='backfill')
assert_series_equal(result, expected)
def test_datetime64_tz_fillna(self):
for tz in ['US/Eastern', 'Asia/Tokyo']:
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp(
'2011-01-03 10:00'), pd.NaT])
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00'), Timestamp(
'2011-01-02 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-02 10:00', tz=tz)])
self.assert_series_equal(expected, result)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00'), 'AAA',
Timestamp('2011-01-03 10:00'), 'AAA'],
dtype=object)
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00'),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00'), Timestamp(
'2011-01-04 10:00')])
self.assert_series_equal(expected, result)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT], tz=tz)
s = pd.Series(idx)
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-02 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp(
'2011-01-02 10:00', tz=tz).to_pydatetime())
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
self.assert_series_equal(expected, result)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), 'AAA',
Timestamp('2011-01-03 10:00', tz=tz), 'AAA'],
dtype=object)
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp(
'2011-01-03 10:00', tz=tz), Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00', tz=tz)})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp(
'2011-01-03 10:00', tz=tz), Timestamp('2011-01-04 10:00',
tz=tz)])
self.assert_series_equal(expected, result)
# filling with a naive/other zone, coerce to object
result = s.fillna(Timestamp('20130101'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp(
'2013-01-01'), Timestamp('2011-01-03 10:00', tz=tz), Timestamp(
'2013-01-01')])
self.assert_series_equal(expected, result)
result = s.fillna(Timestamp('20130101', tz='US/Pacific'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2013-01-01', tz='US/Pacific')])
self.assert_series_equal(expected, result)
def test_fillna_int(self):
s = Series(np.random.randint(-100, 100, 50))
s.fillna(method='ffill', inplace=True)
assert_series_equal(s.fillna(method='ffill', inplace=False), s)
def test_fillna_raise(self):
s = Series(np.random.randint(-100, 100, 50))
self.assertRaises(TypeError, s.fillna, [1, 2])
self.assertRaises(TypeError, s.fillna, (1, 2))
def test_isnull_for_inf(self):
s = Series(['a', np.inf, np.nan, 1.0])
with pd.option_context('mode.use_inf_as_null', True):
r = s.isnull()
dr = s.dropna()
e = Series([False, True, True, False])
de = Series(['a', 1.0], index=[0, 3])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
def test_fillna(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
self.assert_series_equal(ts, ts.fillna(method='ffill'))
ts[2] = np.NaN
exp = Series([0., 1., 1., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(method='ffill'), exp)
exp = Series([0., 1., 3., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(method='backfill'), exp)
exp = Series([0., 1., 5., 3., 4.], index=ts.index)
self.assert_series_equal(ts.fillna(value=5), exp)
self.assertRaises(ValueError, ts.fillna)
self.assertRaises(ValueError, self.ts.fillna, value=0, method='ffill')
# GH 5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.])
assert_series_equal(result, expected)
result = s1.fillna({})
assert_series_equal(result, s1)
result = s1.fillna(Series(()))
assert_series_equal(result, s1)
result = s2.fillna(s1)
assert_series_equal(result, s2)
result = s1.fillna({0: 1})
assert_series_equal(result, expected)
result = s1.fillna({1: 1})
assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
assert_series_equal(result, s1)
s1 = Series([0, 1, 2], list('abc'))
s2 = Series([0, np.nan, 2], list('bac'))
result = s2.fillna(s1)
expected = Series([0, 0, 2.], list('bac'))
assert_series_equal(result, expected)
# limit
s = Series(np.nan, index=[0, 1, 2])
result = s.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
assert_series_equal(result, expected)
result = s.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
assert_series_equal(result, expected)
# GH 9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ['0', '1.5', '-0.3']
for val in vals:
s = Series([0, 1, np.nan, np.nan, 4], dtype='float64')
result = s.fillna(val)
expected = Series([0, 1, val, val, 4], dtype='object')
assert_series_equal(result, expected)
def test_fillna_bug(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
filled = x.fillna(method='ffill')
expected = Series([nan, 1., 1., 3., 3.], x.index)
assert_series_equal(filled, expected)
filled = x.fillna(method='bfill')
expected = Series([1., 1., 3., 3., nan], x.index)
assert_series_equal(filled, expected)
def test_fillna_inplace(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
y = x.copy()
y.fillna(value=0, inplace=True)
expected = x.fillna(value=0)
assert_series_equal(y, expected)
def test_fillna_invalid_method(self):
try:
self.ts.fillna(method='ffil')
except ValueError as inst:
self.assertIn('ffil', str(inst))
def test_ffill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.ffill(), ts.fillna(method='ffill'))
def test_bfill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.bfill(), ts.fillna(method='bfill'))
def test_timedelta64_nan(self):
from pandas import tslib
td = Series([timedelta(days=i) for i in range(10)])
# nan ops on timedeltas
td1 = td.copy()
td1[0] = np.nan
self.assertTrue(isnull(td1[0]))
self.assertEqual(td1[0].value, tslib.iNaT)
td1[0] = td[0]
self.assertFalse(isnull(td1[0]))
td1[1] = tslib.iNaT
self.assertTrue(isnull(td1[1]))
self.assertEqual(td1[1].value, tslib.iNaT)
td1[1] = td[1]
self.assertFalse(isnull(td1[1]))
td1[2] = tslib.NaT
self.assertTrue(isnull(td1[2]))
self.assertEqual(td1[2].value, tslib.iNaT)
td1[2] = td[2]
self.assertFalse(isnull(td1[2]))
# boolean setting
# this doesn't work, not sure numpy even supports it
# result = td[(td>np.timedelta64(timedelta(days=3))) &
# td<np.timedelta64(timedelta(days=7)))] = np.nan
# self.assertEqual(isnull(result).sum(), 7)
# NumPy limitiation =(
# def test_logical_range_select(self):
# np.random.seed(12345)
# selector = -0.5 <= self.ts <= 0.5
# expected = (self.ts >= -0.5) & (self.ts <= 0.5)
# assert_series_equal(selector, expected)
def test_dropna_empty(self):
s = Series([])
self.assertEqual(len(s.dropna()), 0)
s.dropna(inplace=True)
self.assertEqual(len(s), 0)
# invalid axis
self.assertRaises(ValueError, s.dropna, axis=1)
def test_datetime64_tz_dropna(self):
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp(
'2011-01-03 10:00'), pd.NaT])
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-03 10:00')], index=[0, 2])
self.assert_series_equal(result, expected)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT],
tz='Asia/Tokyo')
s = pd.Series(idx)
self.assertEqual(s.dtype, 'datetime64[ns, Asia/Tokyo]')
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-03 10:00', tz='Asia/Tokyo')],
index=[0, 2])
self.assertEqual(result.dtype, 'datetime64[ns, Asia/Tokyo]')
self.assert_series_equal(result, expected)
def test_dropna_no_nan(self):
for s in [Series([1, 2, 3], name='x'), Series(
[False, True, False], name='x')]:
result = s.dropna()
self.assert_series_equal(result, s)
self.assertFalse(result is s)
s2 = s.copy()
s2.dropna(inplace=True)
self.assert_series_equal(s2, s)
def test_valid(self):
ts = self.ts.copy()
ts[::2] = np.NaN
result = ts.valid()
self.assertEqual(len(result), ts.count())
tm.assert_series_equal(result, ts[1::2])
tm.assert_series_equal(result, ts[pd.notnull(ts)])
def test_isnull(self):
ser = Series([0, 5.4, 3, nan, -0.001])
np.array_equal(ser.isnull(),
Series([False, False, False, True, False]).values)
ser = Series(["hi", "", nan])
np.array_equal(ser.isnull(), Series([False, False, True]).values)
def test_notnull(self):
ser = Series([0, 5.4, 3, nan, -0.001])
np.array_equal(ser.notnull(),
Series([True, True, True, False, True]).values)
ser = Series(["hi", "", nan])
np.array_equal(ser.notnull(), Series([True, True, False]).values)
def test_pad_nan(self):
x = Series([np.nan, 1., np.nan, 3., np.nan], ['z', 'a', 'b', 'c', 'd'],
dtype=float)
x.fillna(method='pad', inplace=True)
expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0],
['z', 'a', 'b', 'c', 'd'], dtype=float)
assert_series_equal(x[1:], expected[1:])
self.assertTrue(np.isnan(x[0]), np.isnan(expected[0]))
def test_dropna_preserve_name(self):
self.ts[:5] = np.nan
result = self.ts.dropna()
self.assertEqual(result.name, self.ts.name)
name = self.ts.name
ts = self.ts.copy()
ts.dropna(inplace=True)
self.assertEqual(ts.name, name)
def test_fill_value_when_combine_const(self):
# GH12723
s = Series([0, 1, np.nan, 3, 4, 5])
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
assert_series_equal(res, exp)
class TestSeriesInterpolateData(TestData, tm.TestCase):
def test_interpolate(self):
ts = Series(np.arange(len(self.ts), dtype=float), self.ts.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method='linear')
self.assert_series_equal(linear_interp, ts)
ord_ts = Series([d.toordinal() for d in self.ts.index],
index=self.ts.index).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method='time')
self.assert_series_equal(time_interp, ord_ts)
# try time interpolation on a non-TimeSeries
# Only raises ValueError if there are NaNs.
non_ts = self.series.copy()
non_ts[0] = np.NaN
self.assertRaises(ValueError, non_ts.interpolate, method='time')
def test_interpolate_pchip(self):
tm._skip_if_no_scipy()
_skip_if_no_pchip()
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(Index([49.25, 49.5, 49.75, 50.25, 50.5,
50.75]))
interp_s = ser.reindex(new_index).interpolate(method='pchip')
# does not blow up, GH5977
interp_s[49:51]
def test_interpolate_akima(self):
tm._skip_if_no_scipy()
_skip_if_no_akima()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(method='akima')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_piecewise_polynomial(self):
tm._skip_if_no_scipy()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(
method='piecewise_polynomial')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_from_derivatives(self):
tm._skip_if_no_scipy()
ser = Series([10, 11, 12, 13])
expected = Series([11.00, 11.25, 11.50, 11.75,
12.00, 12.25, 12.50, 12.75, 13.00],
index=Index([1.0, 1.25, 1.5, 1.75,
2.0, 2.25, 2.5, 2.75, 3.0]))
# interpolate at new_index
new_index = ser.index.union(Index([1.25, 1.5, 1.75, 2.25, 2.5, 2.75]))
interp_s = ser.reindex(new_index).interpolate(
method='from_derivatives')
assert_series_equal(interp_s[1:3], expected)
def test_interpolate_corners(self):
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(), s)
tm._skip_if_no_scipy()
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method='index')
expected = s.copy()
bad = isnull(expected.values)
good = ~bad
expected = Series(np.interp(vals[bad], vals[good],
s.values[good]),
index=s.index[bad])
assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method='values')
assert_series_equal(other_result, result)
assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = | Series([1, 3, np.nan, np.nan, np.nan, 11]) | pandas.Series |
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""The trainer program for ESR_EA."""
import os
import math
import torch
import logging
import numpy as np
import pandas as pd
from vega.core.common.class_factory import ClassFactory, ClassType
from vega.search_space import SearchSpace
from vega.search_space.codec import Codec
from vega.search_space.networks import NetworkDesc
from vega.core.common import FileOps, Config
from .esr_ea_individual import ESRIndividual
from vega.core.trainer.callbacks import Callback
@ClassFactory.register(ClassType.CALLBACK)
class ESRTrainerCallback(Callback):
"""Construct the trainer of ESR-EA."""
def before_train(self, epoch, logs=None):
"""Be called before the training process."""
self.cfg = self.trainer.cfg
# Use own save checkpoint and save performance function
self.trainer.auto_save_ckpt = False
self.trainer.auto_save_perf = False
# This part is tricky and
model = ClassFactory.__configs__.get('model', None)
if model:
self.model_desc = model.get("model_desc", None)
if self.model_desc is not None:
model = self._init_model()
self.trainer.build(model=model)
def make_batch(self, batch):
"""Make batch for each training step."""
input = batch["LR"]
target = batch["HR"]
if self.cfg.cuda:
input = input.cuda()
target = target.cuda()
return input, target
def after_epoch(self, epoch, logs=None):
"""Be called after one epoch training."""
# Get summary perfs from logs from built-in MetricsEvaluator callback.
self.performance = logs.get('summary_perfs', None)
best_valid_perfs = self.performance['best_valid_perfs']
best_valid = list(best_valid_perfs.values())[0]
best_changed = self.performance['best_valid_perfs_changed']
if best_changed:
self._save_checkpoint({"Best PSNR": best_valid, "Epoch": epoch})
def after_train(self, logs=None):
"""Be called after the whole train process."""
# Extract performance logs. This can be moved into builtin callback
# if we can unify the performace content
best_valid_perfs = self.performance['best_valid_perfs']
best_valid = list(best_valid_perfs.values())[0]
self._save_performance(best_valid)
def _save_checkpoint(self, performance=None, model_name="best.pth"):
"""Save the trained model.
:param performance: dict of all the result needed
:type performance: dictionary
:param model_name: name of the result file
:type model_name: string
:return: the path of the saved file
:rtype: string
"""
local_worker_path = self.trainer.get_local_worker_path()
model_save_path = os.path.join(local_worker_path, model_name)
torch.save({
'model_state_dict': self.trainer.model.state_dict(),
**performance
}, model_save_path)
torch.save(self.trainer.model.state_dict(), model_save_path)
logging.info("model saved to {}".format(model_save_path))
return model_save_path
def _save_performance(self, performance, model_desc=None):
"""Save result of the model, and calculate pareto front.
:param performance: The dict that contains all the result needed
:type performance: dictionary
:param model_desc: config of the model
:type model_desc: dictionary
"""
self.trainer._save_performance(performance)
# FileOps.copy_file(self.performance_file, self.best_model_pfm)
pd_path = os.path.join(self.trainer.local_output_path, 'population_fitness.csv')
df = | pd.DataFrame([[performance]], columns=["PSNR"]) | pandas.DataFrame |
__title__ = "playground"
__author__ = "murlux"
__copyright__ = "Copyright 2019, " + __author__
__credits__ = (__author__, )
__license__ = "MIT"
__email__ = "<EMAIL>"
from queue import Queue
import threading
import time
import pandas as pd
from datetime import datetime as dt
from typing import Any, Dict, List, Callable, Optional
from playground import settings as s
from playground.messaging import Message, Stream, Payload
from playground.analysis import Analysis
from playground.cryptocompare import CryptoCompareAPI
from playground.models.pair import MarketPair
from playground.util import (
setup_logger,
timestamp_to_date,
)
from playground.util_ops import (
get_cc_callable_by_def,
get_delta_callable_for_tf,
get_cc_callable_by_time,
)
class Warehouse:
"""
A csv-based warehousing absolute unit.
"""
# Operating Logic
ready: bool = False
updated: bool = False
analysed: bool = False
# Assets on which to operate and check for missing and outdated datasets on startup
market_pairs: List = None
missing_sets: List = None
missing_analysed_sets: List = None
outdated_sets: List = None
# Logging attributes
_verbose: bool = None
_extra_verbose: bool = None
# Throttle attributes due to warehouse cycle and API limits etc
__throttle: int = 2
# Safety
___read_lock: threading.RLock = None
___update_lock: threading.RLock = None
def __init__(self) -> None:
"""
Initialize the Warehouse object with the settings.
"""
self.logger = setup_logger(name=__name__)
self.logger.info('Initializing %s module.', __name__)
self.___read_lock = threading.RLock()
self.___update_lock = threading.RLock()
self._parse_settings()
self._parse_running_pairs()
self.missing_sets = self._check_missing_datasets()
if self.missing_sets:
self._build_missing_datasets()
self.outdated_sets = self._check_outdated_datasets()
if len(self.outdated_sets) == 0:
self.logger.info('Warehouse updated.')
self.set_updated()
else:
self.logger.info('Datasets outdated: {}'.format(self.outdated_sets))
self.update_datasets()
if s.FORCE_STARTUP_ANALYSIS:
self.logger.info('Forcing startup analysis.')
self.analyse_datasets()
self.missing_analysed_sets = self._check_missing_datasets(analysed=True)
if len(self.missing_analysed_sets) == 0:
self.logger.info('Warehouse analysed.')
self.set_analysed()
else:
self.logger.info('Datasets outdated: {}'.format(self.outdated_sets))
self.analyse_datasets(datasets=self.missing_analysed_sets)
# Only analyse the ones we detected earlier for less startup time
self.set_ready()
def get_latest_candle(
self, pair: MarketPair = None, timeframe: str = '', analysed: bool = False, closed: bool = False,
) -> pd.DataFrame:
"""
Fetch the last candle from a certain Pair and Timeframe.
Parameters `pair` and `timeframe` are mandatory.
If `analysed` is passed, function will return the latest candle from the dataset post-analysis.
If `closed` is passed, function will return the latest closed candle.
:param `pair`: `MarketPair`
:param `timeframe`: `str`
:param `analysed`: `bool`
:param `closed`: `bool`
"""
dataset_file: str = ''
if analysed:
dataset_file = s.DATASET_FOLDER + '{}_{}_analyzed_v1.csv'.format(pair, timeframe).replace(' ', '')
else:
dataset_file = s.DATASET_FOLDER + '{}_{}.csv'.format(pair, timeframe).replace(' ', '')
try:
_dataset = self._get_dataset_from_file(filename=dataset_file, rows=1)
except Exception as exc:
self.logger.exception(
msg="Warehouse found exception trying to read file. Pair: " + pair + " " + timeframe + "Analysed: " + str(analysed) + "Limit: " + str(limit),
exc_info=exc)
dataset_file = s.DATASET_FOLDER + '{}_{}_analyzed.csv'.format(pair, timeframe).replace(' ', '')
_dataset = self._get_dataset_from_file(filename=dataset_file, rows=s.MAX_ROWS)
if closed:
return _dataset.iloc[1]
return _dataset.iloc[0]
def get_dataset(self, pair: MarketPair = None, timeframe: str = '', analysed: bool = False, limit: int = None) -> pd.DataFrame:
"""
Fetch the dataset up to the maximum number of rows as per config.
Parameters `pair` and `timeframe` are mandatory.
If `analysed` is passed, function will return the latest candle from the dataset post-analysis.
:param `pair`: MarketPair that represents the pair to fetch
:param `timeframe`: str that represents the timeframe, i.e. '5m', '1D'
:param `analysed`: bool that represents if the dataset is analyzed or not
:param `limit`: int that represents the limit of datapoints
"""
dataset_file: str = ''
if analysed:
dataset_file = s.DATASET_FOLDER + '{}_{}_analyzed_v1.csv'.format(pair, timeframe).replace(' ', '')
else:
dataset_file = s.DATASET_FOLDER + '{}_{}.csv'.format(pair, timeframe).replace(' ', '')
_dataset: pd.DataFrame = None
if limit is None:
try:
_dataset = self._get_dataset_from_file(filename=dataset_file, rows=s.MAX_ROWS)
except FileNotFoundError as exc:
self.logger.exception(
msg="Warehouse found exception trying to read file."
" Pair: "+ pair + " " + timeframe + "Analysed: " + str(analysed) + "Limit: " + str(limit),
exc_info=exc)
dataset_file = s.DATASET_FOLDER + '{}_{}_analyzed.csv'.format(pair, timeframe).replace(' ', '')
_dataset = self._get_dataset_from_file(filename=dataset_file, rows=s.MAX_ROWS)
except KeyError as exc:
self.logger.exception(
msg="Warehouse found exception trying to read file."
" Pair: "+ pair + " " + timeframe + "Analysed: " + str(analysed) + "Limit: " + str(limit),
exc_info=exc)
_dataset = self._get_dataset_from_file(filename=dataset_file, rows=s.MAX_ROWS)
else:
try:
_dataset = self._get_dataset_from_file(filename=dataset_file, rows=limit)
except FileNotFoundError as exc:
self.logger.exception(
msg="Warehouse found exception trying to read file."
" Pair: "+ pair + " " + timeframe + "Analysed: " + str(analysed) + "Limit: " + str(limit),
exc_info=exc)
dataset_file = s.DATASET_FOLDER + '{}_{}_analyzed.csv'.format(pair, timeframe).replace(' ', '')
_dataset = self._get_dataset_from_file(filename=dataset_file, rows=limit)
except KeyError as exc:
self.logger.exception(
msg="Warehouse found exception trying to read file."
" Pair: "+ pair + " " + timeframe + "Analysed: " + str(analysed) + "Limit: " + str(limit),
exc_info=exc)
_dataset = self._get_dataset_from_file(filename=dataset_file, rows=limit)
return _dataset
def update_datasets(self) -> None:
"""
Analyse datasets.
"""
if not self.is_updated():
helpers: list = []
for item in self.outdated_sets:
helper = threading.Thread(target=self._update_dataset, args=[item])
helper.start()
helpers.append(helper)
if self._verbose:
self.logger.info('Updating and analysing dataset for {} {}..'.format(
item.get('pair'), item.get('timeframe').replace(' ', '')
)
)
for helper in helpers:
helper.join()
self.outdated_sets = []
self.set_updated()
def analyse_datasets(self, datasets: list = None) -> None:
"""
Analyse datasets.
If `datasets` is passed, function will not use the market_pairs attribute.
:param `datasets`: `list`
"""
helpers: list = []
self.set_analysing()
if not datasets:
for pair in self.market_pairs:
for tf in s.WAREHOUSE_TIMEFRAMES:
item = {
'pair': pair,
'timeframe': tf
}
helper = threading.Thread(target=Analysis, args=[item])
helper.start()
helpers.append(helper)
if self._verbose:
self.logger.info('Re-analysing dataset for {} {}..'.format(
item.get('pair'), item.get('timeframe').replace(' ', '')
)
)
else:
for pair in datasets:
for tf in s.WAREHOUSE_TIMEFRAMES:
item = {
'pair': pair,
'timeframe': tf
}
helper = threading.Thread(target=Analysis, args=[item])
helper.start()
helpers.append(helper)
if self._verbose:
self.logger.info('Analysing dataset for {} {}..'.format(
item.get('pair'), item.get('timeframe').replace(' ', '')
)
)
for helper in helpers:
helper.join()
self.logger.info('Warehouse analysed..')
if datasets:
self.missing_analysed_sets = []
self.set_analysed()
def update(self) -> None:
"""
This method needs to be called in a loop by a worker.
"""
self.outdated_sets = self._check_outdated_datasets()
if len(self.outdated_sets) == 0:
self.logger.info('Warehouse remains updated.')
self.set_updated()
else:
self.logger.info('Datasets outdated: {}'.format(self.outdated_sets))
self.set_updating()
self.update_datasets()
self.logger.info('Warehouse successfully updated.')
def _update_dataset(self, config: Dict[str, Any]) -> None:
"""
This function is used by the Warehouse to update the disk datasets with the newest data.
The behavior of this method is designed this way because it is intended for both threaded and unthreaded use.
Param `config` is mandatory.
Config follows the structured defined in the settings module.
:param `config`: `dict`
"""
if self._extra_verbose:
self.logger.info('Updating dataset %s %s.', config.get('pair'), config.get('timeframe').replace(' ', ''))
cc_config = {
'comparison_symbol': str(config.get('pair').quote_currency),
'apikey': str(config.get('pair')._api_key),
}
_cc: CryptoCompareAPI = CryptoCompareAPI(config=cc_config, logger=self.logger, verbose=self._extra_verbose)
api_call: Callable = None
api_args: dict = None
candle = self.get_latest_candle(pair=config.get('pair'), timeframe=config.get('timeframe'))
(api_call, api_args) = get_cc_callable_by_time(cc=_cc, config=config, candle=candle)
limit_arg: int = api_args.get('limit', 0)
limit_left: int = -1
if limit_arg > 2000:
api_args['limit'] = 2000
limit_left = limit_arg - 2000
if self._extra_verbose:
self.logger.info('Updating dataset %s %s. Candles: {} candles-left: {} '.format(limit_arg, limit_left), config.get('pair'), config.get('timeframe').replace(' ', ''))
new_data: list = None
initial_data: dict = None
data: dict = None
new_dataset: pd.DataFrame = None
# Fetch our disk dataset so we can update it as we go
self.___read_lock.acquire(blocking=True)
dataset_file = s.DATASET_FOLDER + '{}_{}.csv'.format(
config.get('pair'), config.get('timeframe')).replace(' ', '')
disk_dataset = self._get_dataset_from_file(filename=dataset_file,)
disk_dataset = disk_dataset.set_index('time')
self.___read_lock.release()
try:
data = api_call(**api_args)
except Exception as exc:
self.logger.exception('Warehouse found exception: Connection with API is unstable. :: %s', config, exc_info=exc)
time.sleep(self.__throttle)
"""
Fetch new data in a loop, if the key 'limit' in the `api_args` dict is lesser than 2000 it will break the loop
"""
while True:
if limit_left == 0:
break
# Limit is lesser than 2000, break the loop and update dataset
if limit_arg < 2000:
new_data: list = data.get('Data', None)
if new_data:
new_dataset: pd.DataFrame = pd.DataFrame(new_data)
new_dataset['datetime'] = [dt.fromtimestamp(d) for d in new_dataset.time]
new_dataset['timestamp'] = [d for d in new_dataset.time]
new_dataset = new_dataset.set_index('time')
break
if data is not None:
initial_data: list = data.get('Data', None)
new_dataset: pd.DataFrame = pd.DataFrame(initial_data).set_index('time')
if self._extra_verbose:
self.logger.info('Dataset Length: ' + str(len(new_dataset)))
while limit_left > 0:
if limit_left > 2000:
api_args = {
'symbol': str(config.get('pair').base_currency),
'aggregate': int(config.get('timeframe').split(' ')[0]),
'limit': 2000,
'timestamp': data.get('TimeFrom', None),
}
limit_left -= 2000
else:
api_args = {
'symbol': str(config.get('pair').base_currency),
'aggregate': int(config.get('timeframe').split(' ')[0]),
'limit': limit_left,
'timestamp': data.get('TimeFrom', None),
}
limit_left = 0
if self._extra_verbose:
self.logger.info('Updating dataset %s %s. Candles: {} candles-left: {} '.format(limit_arg, limit_left), config.get('pair'), config.get('timeframe').replace(' ', ''))
try:
data = api_call(**api_args)
except Exception as exc:
self.logger.exception('Warehouse found exception: Connection with API is unstable. :: %s', config, exc_info=exc)
time.sleep(self.__throttle)
continue
if data:
self.logger.info('Fetched dataset for %s - %s from ' + \
str(timestamp_to_date(data.get('TimeFrom', None)).date()) + ' to ' + \
str(timestamp_to_date(data.get('TimeTo', None)).date()), config['pair'], config['timeframe']
)
new_data: list = data.get('Data', None)
if new_data is not None and len(new_data) != 0:
if new_data[0]['high'] == 0 and new_data[0]['open'] == 0 and \
new_data[0]['low'] == 0 and new_data[0]['close'] == 0 and new_data[0]['volumeto'] == 0:
break
df: pd.DataFrame = pd.DataFrame(new_data).set_index('time')
new_dataset = new_dataset.append(df, sort=False)
if self._extra_verbose:
self.logger.info('Dataset Length: ' + str(len(new_dataset)))
else:
break
time.sleep(self.__throttle)
else:
break
new_dataset.sort_index(inplace=True, ascending=False)
new_dataset['datetime'] = [dt.fromtimestamp(d) for d in new_dataset.index]
new_dataset['timestamp'] = [d for d in new_dataset.index]
if self._extra_verbose:
self.logger.info('DataFrame: ' + str(new_dataset.shape))
if new_dataset is not None:
self.___update_lock.acquire(blocking=True)
# Join the data together and overwrite existing timeperiods with newest data
newest_dataset: pd.DataFrame = disk_dataset.copy(deep=True)
newest_dataset = newest_dataset.append(new_dataset, sort=False)
newest_dataset = newest_dataset.drop_duplicates(subset='timestamp', keep='last')
newest_dataset.sort_index(inplace=True, ascending=False)
# Save the new data
newest_dataset.to_csv(dataset_file)
if self._extra_verbose:
self.logger.info('Updated dataset %s %s.', config.get('pair'), config.get('timeframe').replace(' ', ''))
self.___update_lock.release()
else:
self._build_dataset(config=config)
return Analysis(item=config)
def _build_dataset(self, config: Dict[str, Any]) -> pd.DataFrame:
"""
This function is used by the Warehouse to build the missing datasets with data going as far as it can get.
The behavior of this method is designed this way because it is intended for both threaded and unthreaded use.
Param `config` is mandatory.
Config follows the structured defined in the settings module.
:param `config`: `dict`
"""
if self._extra_verbose:
self.logger.info('Building dataset for %s %s.', config['pair'], config['timeframe'])
cc_config = {
'comparison_symbol': str(config.get('pair').quote_currency),
'apikey': str(config.get('pair')._api_key),
}
_cc: CryptoCompareAPI = CryptoCompareAPI(config=cc_config, logger=self.logger, verbose=self._extra_verbose)
initial_data: dict = None
data: dict = None
api_call: Callable = None
api_args: dict = None
api_call, api_args = get_cc_callable_by_def(config=config, cc=_cc)
while True:
try:
data = api_call(**api_args)
except Exception as exc:
self.logger.exception('Warehouse found exception: Connection with API is unstable. :: %s', config, exc_info=exc)
time.sleep(self.__throttle)
continue
if data:
initial_data = data.get('Data', None)
dataset: pd.DataFrame = pd.DataFrame(initial_data).set_index('time')
if self._extra_verbose:
self.logger.info('Dataset Length: ' + str(len(dataset)))
while True:
api_args = {
'symbol': str(config.get('pair').base_currency),
'aggregate': int(config.get('timeframe').split(' ')[0]),
'limit': 2000,
'timestamp': data.get('TimeFrom', None),
}
try:
data = api_call(**api_args)
except Exception as exc:
self.logger.exception('Warehouse found exception: Connection with API is unstable. :: %s', config, exc_info=exc)
time.sleep(self.__throttle)
continue
if data:
self.logger.info('Fetched dataset for %s - %s from ' + \
str(timestamp_to_date(data.get('TimeFrom', None)).date()) + ' to ' + \
str(timestamp_to_date(data.get('TimeTo', None)).date()), config['pair'], config['timeframe']
)
new_data: list = data.get('Data', None)
if new_data is not None and len(new_data) != 0:
if new_data[0]['high'] == 0 and new_data[0]['open'] == 0 and \
new_data[0]['low'] == 0 and new_data[0]['close'] == 0 and new_data[0]['volumeto'] == 0:
break
df: pd.DataFrame = pd.DataFrame(new_data).set_index('time')
dataset = dataset.append(df, sort=False)
if self._extra_verbose:
self.logger.info('Dataset Length: ' + str(len(dataset)))
else:
break
time.sleep(self.__throttle)
else:
break
self.___update_lock.acquire(blocking=True)
dataset.sort_index(inplace=True, ascending=False)
dataset['datetime'] = [dt.fromtimestamp(d) for d in dataset.index]
dataset['timestamp'] = [d for d in dataset.index]
if self._extra_verbose:
self.logger.info('DataFrame: ' + str(dataset.shape))
dataset_file = s.DATASET_FOLDER + '{}_{}.csv'.format(config['pair'], config['timeframe']).replace(' ', '')
dataset.to_csv(dataset_file)
self.___update_lock.release()
break
return Analysis(item=config)
def _build_missing_datasets(self) -> None:
"""
This function is used by the Warehouse to build the missing datasets with data going as far as it can get.
"""
self.logger.info('Building datasets for: %s', self.missing_sets)
helpers: list = []
for item in self.missing_sets:
helper = threading.Thread(target=self._build_dataset, args=[item])
helper.start()
helpers.append(helper)
if self._verbose:
self.logger.info('Building dataset for {} {}..'.format(
item.get('pair'), item.get('timeframe').replace(' ', '')
)
)
for helper in helpers:
helper.join()
self.logger.info('Warehouse ready..')
self.missing_sets = []
return self.set_ready()
def _check_outdated_datasets(self) -> list:
"""
This function is used by the Warehouse to check for outdated datasets.
It will run inside the warehouse loop.
"""
outdated_pair_tf: list = []
for pair in self.market_pairs:
for tf in s.WAREHOUSE_TIMEFRAMES:
candle = self.get_latest_candle(pair=pair, timeframe=tf)
candle_time = dt.fromtimestamp(candle.time)
current_time = dt.now()
if self._extra_verbose:
self.logger.info('{} - Candle Time: {}'.format(str(str(pair)+' '+tf), candle_time))
# rd stands for relativedelta
rd_call: Callable = None
rd_args: dict = None
rd_call, rd_args = get_delta_callable_for_tf(tf=tf)
delta = rd_call(**rd_args)
next_candle = (candle_time + delta)
if self._extra_verbose:
self.logger.info('{} - Next Time: {}'.format(str(str(pair)+' '+tf), next_candle))
if current_time > next_candle:
outdated_pair_tf.append({
'pair': pair,
'timeframe': tf
})
return outdated_pair_tf
def _check_missing_datasets(self, analysed: bool = False) -> list:
"""
Checks for missing datasets.
If `analysed` is passed, function will check for analysed datasets.
:param `analysed`: `bool`
"""
missing_pair_tf: list = []
dataset_file: str = ''
for pair in self.market_pairs:
for tf in s.WAREHOUSE_TIMEFRAMES:
if analysed:
dataset_file = s.DATASET_FOLDER + '{}_{}_analyzed_v1.csv'.format(pair, tf).replace(' ', '')
else:
dataset_file = s.DATASET_FOLDER + '{}_{}.csv'.format(pair, tf).replace(' ', '')
exists = self._dataset_exists(filename=dataset_file)
if exists is not None and not exists:
missing_pair_tf.append({
'pair': pair,
'timeframe': tf
})
if analysed:
self.logger.info('Analysis datasets missing: {}'.format(missing_pair_tf))
else:
self.logger.info('Datasets missing: {}'.format(missing_pair_tf))
return missing_pair_tf
def _parse_settings(self) -> None:
"""
Parse the warehouse's settings, such as logging, storage, etc..
"""
self.logger.info('Parsing module settings.')
self._verbose = s.WAREHOUSE_VERBOSITY
self._extra_verbose = s.WAREHOUSE_EXTRA_VERBOSITY
def _parse_running_pairs(self) -> None:
"""
Parse the warehouse's running pairs and act accordingly.
"""
self.logger.info('Parsing running pairs..')
market_pairs: list = []
for pair in s.MARKET_PAIRS:
market_pair = MarketPair(
config=pair
)
market_pairs.append(market_pair)
self.logger.info('Module found {} pairs: {}'.format(len(market_pairs), market_pairs))
self.market_pairs = market_pairs
def _dataset_exists(self, filename: str = None) -> bool:
"""
Check if the dataset exists.
Parameter `filename` is mandatory.
:param `filename`: `str`
"""
if filename is not None:
dataset: pd.DataFrame = pd.DataFrame()
self.___read_lock.acquire(blocking=True)
try:
dataset = pd.read_csv(filename)
except FileNotFoundError as exc:
self.logger.error("Warehouse found excecption trying to read file. Filename: " + filename)
self.___read_lock.release()
return False
if dataset.empty:
self.___read_lock.release()
return False
else:
self.___read_lock.release()
return True
return None
def _get_dataset_from_file(
self, filename: str = None, rows: int = 0,
) -> Optional[pd.DataFrame]:
"""
Fetch the dataset from a file with `filename`.
Parameter `filename` is mandatory.
If `rows` is passed, function will only read `nrows=rows` from the file, default is 5000 as per settings.
:param `filename`: `str`
:param `rows`: `int`
"""
if filename is not None:
dataset: pd.DataFrame = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import os
import json
import csv
from simple_salesforce import Salesforce # imported salesforce
from config import *
# Login to Salesforce
print("---- logging into Salesforce ----")
sf = Salesforce(username=username, password=password, security_token=token, domain='test')
print("--- login success! ---")
def set_record_type(row):
if row['SerialNo']:
return int(row['SerialNo'])
# TODO : move this to a function later with a SOQL string as parameter
print("--- fetching chunk series details from SF ---")
chunkINFO = sf.bulk.HyperBatchOutput__c.query("SELECT Serial_Number__c,BatchState1__c FROM HyperBatchOutput__c WHERE Serial_Number__c !=null AND ObjectName__c ='Opportunity' Order By Serial_Number__c ASC")
chunkDF = | pd.DataFrame(chunkINFO) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 3 14:18:10 2017
@author: massimo
Straight import of exiobase data
"""
import pandas as pd
import numpy as np
def importing(filename, celltype):
'''
Args:
'filename' [string] name of the file...
'celltype' [type of file], three values allowed:
'single': text file (maybe generated from a simple Excel file) format:
row 1: text, anything, e.g. "CountryCode_ActivityTypeName"
column 1: text, anything, e.g. "CountryCode_ProductTypeName"
'multi': text file (maybe generated from a complex Excel file) where:
row 2: float, total output per sector
row 4: text, CountryCode
row 5: text, ActivityTypeName
column 1: text, CountryCode
column 2: text, ProductTypeName
column 5: text, UnitCode
'exiobase': text file in exiobase format (used for SUP, USE, FD, emissions, resources):
row 1: text, CountryCode
row 2: text, ActivityTypename
column 1: text, CountryCode (or Comparment)
column 2: text, ProductTypeName (or Substance)
column 3: text, UnitCode
Setting any other value allows importing
exiobase format used for factors and materials, which is:
row 1: text, CountryCode
row 2: text, ActivityTypename
column 2: text, PhysicalTypeName (or FactorInputTypeNamey)
column 3: text, UnitCode
The factor name will be listed in the first index level
'''
if celltype == "single" and filename[-3:] == "txt":
print('Importing singletxt...')
MRtable = pd.read_table(filename, header=0, index_col=0)
MRtable = MRtable.astype(float)
print('Done, this is NOT a multi-index pd.DataFrame object')
elif celltype == "single" and filename[-3:] == "csv":
print('Importing singlecsv...')
MRtable = pd.read_csv(filename, header=0, index_col=0, sep=';')
MRtable = MRtable.astype(float)
print('Done, this is NOT a multi-index pd.DataFrame object')
elif celltype == "multi" and filename[-3:] == "txt":
print('Importing multitxt...')
MRtable = pd.read_table(filename, header=None, dtype = object)
c_cindex = MRtable.iloc[3,5:]
n_cindex = MRtable.iloc[4,5:]
mydata = MRtable.iloc[7:,5:]
c_rindex = MRtable.iloc[7:,0]
n_rindex = MRtable.iloc[7:,1]
u_rindex = MRtable.iloc[7:,4]
mrindex = [np.array(c_rindex),
np.array(n_rindex),
np.array(u_rindex)]
mcindex = [np.array(c_cindex),
np.array(n_cindex)]
exio_format_table = pd.DataFrame(mydata.values, index = mrindex, columns = mcindex, dtype = float)
#exio_format_table.index.names = ['Reg','Prod','Unit']
#exio_format_table.columns.names = ['Reg','Act']
tot_output = MRtable.iloc[1,5:]
supply = pd.DataFrame(data=tot_output.T.values, columns=["Supply"],
index=mcindex, dtype=float).T
#supply.columns.names = ['Reg','Act']
MRtable = {'table': exio_format_table, 'diag': supply }
print('Done, this is a dict of: "table", "diag"')
print('"table" is a multi-index pd.DataFrame object!')
elif celltype == "multi" and filename[-3:] == "csv":
print('Importing multicsv...')
MRtable = pd.read_csv(filename, header=None, sep=';', dtype = object)
c_cindex = MRtable.iloc[3,5:]
n_cindex = MRtable.iloc[4,5:]
mydata = MRtable.iloc[7:,5:]
c_rindex = MRtable.iloc[7:,0]
n_rindex = MRtable.iloc[7:,1]
u_rindex = MRtable.iloc[7:,4]
mrindex = [np.array(c_rindex),
np.array(n_rindex),
np.array(u_rindex)]
mcindex = [np.array(c_cindex),
np.array(n_cindex)]
exio_format_table = pd.DataFrame(mydata.values, index = mrindex, columns = mcindex, dtype = float)
#exio_format_table.index.names = ['Reg','Prod','Unit']
#exio_format_table.columns.names = ['Reg','Act']
tot_output = MRtable.iloc[1,5:]
supply = pd.DataFrame(data=tot_output.T.values, columns=["Supply"],
index=mcindex, dtype=float).T
#supply.columns.names = ['Reg','Act']
MRtable = {'table': exio_format_table, 'diag': supply }
print('Done, this is a dict of: "table", "diag"')
print('"table" is a multi-index pd.DataFrame object!')
elif celltype == 'exiobase' and filename[-3:] == "txt":
print('Importing exiobasetxt file...')
MRtable = pd.read_table(filename, header = [0,1], index_col = [0,1,2], dtype = object)
MRtable = MRtable.astype(float) # didn't work with read_table
#MRtable.index.names = ['Reg','Prod','Unit']
#MRtable.columns.names = ['Reg','Act']
print('Done, this is a multi-index pd.DataFrame object!')
elif celltype == 'exiobase' and filename[-3:] == "csv": # semicolon as separator!
print('Importing exiobasecsv file...')
MRtable = | pd.read_csv(filename, header = [0,1], index_col = [0,1,2], sep = ";", dtype = object) | pandas.read_csv |
"""
Runs ramulator on specified trace files individually in order to gather basic miss/hit information beforehand
-> stats for each run are stored in BASE_STATS_DIR, where a trace file named 'trace_name' is saved as trace_name_stats.txt
Note: Expects trace files to not be in archives
Usage: python get_trace_stats.py [--existing]
"""
import subprocess #for running ramulator in shell
import sys
from os import listdir, makedirs
import argparse
if __name__ == '__main__':
BASE_STATS_DIR = './base_stats_8channel'
TRACE_DIR = './cputraces_unpacked'
INSTR_RECORD = 200000000 #the value of expected_limit_insts TODO: read this from trace file maybe?
TEST_GROUPS = [['libquantum','leslie3d','milc','cactusADM'],
['GemsFDTD','lbm','astar','milc'],
['libquantum', 'leslie3d', 'milc', 'h264ref'],
['libquantum', 'leslie3d', 'GemsFDTD', 'h264ref'],
['wrf', 'gcc', 'lbm', 'libquantum'],
['gcc', 'bzip2', 'astar', 'zeusmp'],
['wrf', 'bzip2', 'gcc', 'astar'],
['wrf', 'bzip2', 'gcc', 'zeusmp'],
['libquantum','leslie3d','milc','cactusADM','GemsFDTD','lbm','astar','zeusmp'],
['libquantum','leslie3d','milc','cactusADM','GemsFDTD','lbm','soplex','xalancbmk'],
['libquantum','leslie3d','milc','cactusADM','wrf','bzip2','gcc','namd'],
['GemsFDTD','lbm','astar','milc','wrf','bzip2','gcc','gobmk']
]
arg_parser = argparse.ArgumentParser(description=None)
arg_parser.add_argument("--existing", action='store_true')
args = arg_parser.parse_args()
try:
makedirs(BASE_STATS_DIR) #make the output directory if it isn't there
except FileExistsError:
print(f"Output directory {BASE_STATS_DIR} already exists, skipping creation")
"""1. Simulate all test files in ramulator"""
trace_procs = []
if(not args.existing): #pass --existing to skip the ramulator simulations and go straight to trace file processing
# Start all the trace simulations
for trace_name in listdir(TRACE_DIR):
command = f"./ramulator configs/DDR3-config.cfg --mode=cpu --stats {BASE_STATS_DIR}/{trace_name}.txt {TRACE_DIR}/{trace_name}"
print(command)
trace_ram_p = subprocess.Popen(command.split(" "))
trace_procs.append(trace_ram_p)
# Wait for all the simulations to finish
for trace_p in trace_procs:
trace_p.wait()
print("All simulations finished, starting processing")
"""2. Process stats in BASE_STATS_DIR, creating Pandas dataframe that is then displayed"""
try:
import pandas as pd
import matplotlib.pyplot as plt
except ImportError: #no pandas, can't do data processing
print("ERROR: No Pandas/matplotlib installation - run 'pip install pandas' and 'pip install matplotlib' if you want to process the trace files")
exit(1)
stat_names = ['ramulator.record_insts_core_0', 'ramulator.record_cycs_core_0', 'ramulator.L3_cache_read_miss', 'ramulator.L3_cache_write_miss', 'ramulator.L3_cache_total_miss'] #stats we are interested in?
trace_stats_files = [file_name for file_name in listdir(BASE_STATS_DIR) if ".txt" in file_name] #don't include .csv file from previous run
trace_stat_names = [trace_stat.replace('.txt', '').split('.')[1] for trace_stat in trace_stats_files]
trace_stat_dicts = [] #list of dictionaries, each one holding stats for a matching trace in trace_stats_files
for trace_stat_f in trace_stats_files:
trace_path = f"{BASE_STATS_DIR}/{trace_stat_f}"
trace_stats_dict = {}
for line in open(trace_path, 'r'):
print(trace_path)
stat_name, stat_val = line.split()[0], float(line.split()[1])
#print(repr(stat_name))
if(stat_name in stat_names): #if it's a stat we are interested in, save it (remove the ramulator part)
field = stat_name.replace('ramulator.', '').replace('record_', '')
trace_stats_dict[field] = stat_val
trace_stat_dicts.append(trace_stats_dict)
trace_stat_df = pd.DataFrame(trace_stat_dicts, index = trace_stat_names)
#trace_stat_df['total_misses'] = trace_stat_df['read_misses'] + trace_stat_df['write_misses']
#trace_stat_df['total_hits'] = trace_stat_df['read_hits'] + trace_stat_df['write_hits']
#trace_stat_df['total_conflicts'] = trace_stat_df['read_conflicts'] + trace_stat_df['write_conflicts']
trace_stat_df['MPKI'] = trace_stat_df['L3_cache_total_miss']/((INSTR_RECORD)/1000)
trace_stat_df['IPC'] = trace_stat_df['insts_core_0']/trace_stat_df['cycs_core_0']
#trace_stat_df['MPKI w/ Conflict'] = (trace_stat_df['total_misses'] + trace_stat_df['total_conflicts']) /((INSTR_RECORD)/1000)
trace_stat_df.index = trace_stat_df.index.rename("app")
print("Individual App Statistics")
print(trace_stat_df.head(200))
#plot output MPKI
trace_stat_df[['MPKI']].sort_index(key = lambda v : v.str.lower()).plot.bar()
plt.xticks(rotation='vertical')
plt.savefig(f"{BASE_STATS_DIR}/MPKI_plot.png", dpi = 400, bbox_inches='tight')
#plot output IPC
trace_stat_df[['IPC']].sort_index(key = lambda v : v.str.lower()).plot.bar()
plt.xticks(rotation='vertical')
plt.savefig(f"{BASE_STATS_DIR}/IPC_plot.png", dpi = 400, bbox_inches='tight')
#save entire dataframe as CSV
out_path = f"{BASE_STATS_DIR}/hit_stats_individual.csv"
print(f"Saving individual app data dataframe as CSV at {out_path}")
trace_stat_df.to_csv(out_path)
trace_stat_groups_strs = [" | ".join(group) for group in TEST_GROUPS]
trace_stat_group_dict = {}
for group_num, group_apps in enumerate(TEST_GROUPS):
group_str = trace_stat_groups_strs[group_num]
trace_stat_group_dict[group_str] = []
for group_app in group_apps:
trace_stat_group_dict[group_str].append(float(trace_stat_df[trace_stat_df.index == group_app]['IPC']))
trace_stat_group_df = | pd.DataFrame.from_dict(trace_stat_group_dict, orient='index') | pandas.DataFrame.from_dict |
import plotly.graph_objects as go
import pandas as pd
import plotly.express as px
from datetime import datetime, timedelta
import requests
import json
import time
def read():
df1 = pd.read_csv("CSV/ETH_BTC_USD_2015-08-09_2020-04-04-CoinDesk.csv")
df1.columns = ['date', 'ETH', 'BTC']
df1.date = pd.to_datetime(df1.date, dayfirst=True)
df1.set_index('date', inplace=True)
EOS = pd.read_csv("ICO_coins/EOS_USD_2018-06-06_2020-04-02-CoinDesk.csv")
IOTA = pd.read_csv("ICO_coins/IOTA_USD_2018-06-06_2020-04-02-CoinDesk.csv")
LSK = pd.read_csv("ICO_coins/LSK_USD_2018-06-06_2020-04-02-CoinDesk.csv")
NEO = pd.read_csv("ICO_coins/NEO_USD_2018-06-06_2020-04-02-CoinDesk.csv")
TRX = pd.read_csv("ICO_coins/tron/TRX_USD_2018-06-06_2020-04-02-CoinDesk.csv")
ADA = | pd.read_csv("ICO_coins/cardano/ADA_USD_2018-06-06_2020-04-02-CoinDesk.csv") | pandas.read_csv |
from config.logger import logger
import pandas as pd # must be replaced with internal python tools!
import datetime
from docker import DockerClient
from docker.errors import DockerException, APIError, ContainerError, ImageNotFound
import os
import time
class Operator():
def __init__(self):
try:
self.client = DockerClient(base_url='unix://var/run/docker.sock',timeout=10)
except DockerException as exc:
logger.error(f'Connection with docker.socket aborted {exc}')
raise exc
self.history = None
self.future = None
self.start = None
self.stop = None
self.host_name = None
self.container_id = None
self.service_response = None
self.container_state = None
def _runContainer(self, image, volumes, ports):
'''Run a docker container using a given image; passing keyword arguments
documented to be accepted by docker's client.containers.run function
No extra side effects. Handles and reraises ContainerError, ImageNotFound,
and APIError exceptions.
'''
network = os.getenv('SERVICES_NETWORK', default='service_network')
con_mem_limit = os.getenv('CONTAINER_MEM_LIMIT', default='512m')
ports = {ports:None}
container = None
try:
container = self.client.containers.run(
image,
name=f'point-{self.point}_' + str(int(time.time())),
ports=ports,
volumes=volumes,
detach=True,
mem_limit=con_mem_limit,
cpuset_cpus="1",
network=network
)
if "Name" in container.attrs.keys():
logger.info(f'Container {container.attrs["Name"]} is now running.')
except ContainerError as exc:
logger.error("Failed to run container")
raise exc
except ImageNotFound as exc:
logger.error("Failed to find image to run as a docker container")
raise exc
except APIError as exc:
logger.error("Unhandled error")
raise exc
return container
def _removeContainer(self):
'''
Remove a docker container using a given id; passing keyword arguments
documented to be accepted by docker's client.containers.remove function
No extra side effects. Handles and reraises APIError exceptions.
'''
try:
container = self.client.containers.get(container_id=self.container_id)
container.remove(force=True)
logger.info(f'Container {self.container_id} was removed')
self.container_id = None
except APIError as exc:
logger.error(f'Unhandled APIError error: {exc}')
raise exc
def run_steps(self, data, config, service):
image = config.get('image')
volumes = config.get('volumes')
app_port = config.get('app_port')
host_name = config.get('host_name')
self.point = data.get('metadata').get('point')
self.start = str(datetime.datetime.now())
self.container_id = None
count = 0
containet_state = ''
con_ext_port = None
try:
while True:
# step 1. Create and run container
if self.container_id == None:
self.container = self._runContainer(
image,
volumes,
ports=app_port
)
self.container_id = self.container.short_id
#
elif containet_state.lower() == 'exited':
logger.warning(f'Container {self.container_id} unexpected exited')
break
# step 3. Try to get information about container
elif containet_state.lower() == 'created':
time.sleep(5)
count = count + 1
self.container = self.client.containers.get(container_id=self.container_id)
# step 2. Send data and get prediction
elif containet_state.lower() == 'running':
[cont_port] = self.container.ports.get(app_port)
cont_port = cont_port.get('HostPort')
payload = service.handleRequest(data)
self.service_response = service.call(host_name, cont_port, payload)
break
elif count >= 5:
logger.warning('Max retries exeeded')
break
containet_state = self.container.attrs['State'].get('Status')
self.stop = str(datetime.datetime.now())
result = {
"metadata": {
"containerId": self.container_id,
"point": self.point,
"start_time": self.start,
"finish_time": self.stop
},
'prediction': self.service_response
}
except (APIError, DockerException) as exc:
logger.error(f'Error create docker: {exc}')
raise exc
finally:
# step 4.1 stop and remove a container
self._removeContainer()
return result
class Test(object):
def __init__(self):
self.start = None
self.finish = None
self.config = None
self.type = None
self.point = None
self.response = None
self.dataset = {}
self.time_steps = None
self.time_freq = None
self.regressor = None
self.settings = None
#Only for test.
def job(self, data):
self.config = data['config']
self.dataset = data['data']
# self.regressor = data['regressor']
self.time_steps = self.config['time_steps']
self.time_freq = self.config['time_freq']
self.rolling_window = self.config['rolling']
hist_test_df = pd.DataFrame(self.dataset, columns=['ds','y','x'])
hist_test_df['ds'] = pd.to_datetime(hist_test_df['ds'], unit='ms')
hist_test_df = hist_test_df.set_index(['ds'])
hist_test_df = hist_test_df.astype('float')
last_date = hist_test_df.index[-1:]
time_freq = self.time_freq
[date_year] = last_date.year.tolist()
[date_month] = last_date.month.tolist()
[date_day] = last_date.day.tolist()
[date_hour] = last_date.hour.tolist()
date_time = datetime.datetime(date_year, \
month=date_month, \
day=date_day, \
hour=date_hour, \
minute=0)
dtr = pd.date_range(start=date_time, periods=len(hist_test_df), freq=time_freq)
predict_test_df = pd.DataFrame({'date_time':dtr})
transform = | pd.DataFrame(data=hist_test_df['y'].values, columns=['y']) | pandas.DataFrame |
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from datetime import datetime
import json
from bs4 import BeautifulSoup
import requests
from tqdm import tqdm
def timestamp2date(timestamp):
# function converts a Uniloc timestamp into Gregorian date
return datetime.fromtimestamp(int(timestamp)).strftime('%Y-%m-%d')
def date2timestamp(date):
# function coverts Gregorian date in a given format to timestamp
return datetime.strptime(date, '%Y-%m-%d').timestamp()
def getCryptoOHLC(fsym, tsym):
# function fetches a crypto price-series for fsym/tsym and stores
# it in pandas DataFrame
cols = ['date', 'timestamp', 'open', 'high', 'low', 'close']
lst = ['time', 'open', 'high', 'low', 'close']
timestamp_today = datetime.today().timestamp()
curr_timestamp = timestamp_today
for j in range(2):
df = pd.DataFrame(columns=cols)
# (limit-1) * 2 = days
# One year is around 184
limit = 184
url = ("https://min-api.cryptocompare.com/data/histoday?fsym=" +
fsym + "&tsym=" + tsym + "&toTs=" + str(int(curr_timestamp)) + "&limit=" + str(limit))
response = requests.get(url)
soup = BeautifulSoup(response.content, "html.parser")
dic = json.loads(soup.prettify())
for i in range(1, limit):
tmp = []
for e in enumerate(lst):
x = e[0]
y = dic['Data'][i][e[1]]
if(x == 0):
tmp.append(str(timestamp2date(y)))
tmp.append(y)
if(np.sum(tmp[-4::]) > 0):
df.loc[len(df)] = np.array(tmp)
df.index = | pd.to_datetime(df.date) | pandas.to_datetime |
"""
Created on Wed Feb 27 15:12:14 2019
@author: cwhanse
"""
import numpy as np
import pandas as pd
from pandas.testing import assert_series_equal
from datetime import datetime
import pytz
import pytest
from solarforecastarbiter.validation import validator
import pvlib
from pvlib.location import Location
@pytest.fixture
def irradiance_QCRad():
output = pd.DataFrame(
columns=['ghi', 'dhi', 'dni', 'solar_zenith', 'dni_extra',
'ghi_limit_flag', 'dhi_limit_flag', 'dni_limit_flag',
'consistent_components', 'diffuse_ratio_limit'],
data=np.array([[-100, 100, 100, 30, 1370, 0, 1, 1, 0, 0],
[100, -100, 100, 30, 1370, 1, 0, 1, 0, 0],
[100, 100, -100, 30, 1370, 1, 1, 0, 0, 1],
[1000, 100, 900, 0, 1370, 1, 1, 1, 1, 1],
[1000, 200, 800, 15, 1370, 1, 1, 1, 1, 1],
[1000, 200, 800, 60, 1370, 0, 1, 1, 0, 1],
[1000, 300, 850, 80, 1370, 0, 0, 1, 0, 1],
[1000, 500, 800, 90, 1370, 0, 0, 1, 0, 1],
[500, 100, 1100, 0, 1370, 1, 1, 1, 0, 1],
[1000, 300, 1200, 0, 1370, 1, 1, 1, 0, 1],
[500, 600, 100, 60, 1370, 1, 1, 1, 0, 0],
[500, 600, 400, 80, 1370, 0, 0, 1, 0, 0],
[500, 500, 300, 80, 1370, 0, 0, 1, 1, 1],
[0, 0, 0, 93, 1370, 1, 1, 1, 0, 0]]))
dtypes = ['float64', 'float64', 'float64', 'float64', 'float64',
'bool', 'bool', 'bool', 'bool', 'bool']
for (col, typ) in zip(output.columns, dtypes):
output[col] = output[col].astype(typ)
return output
def test_check_ghi_limits_QCRad(irradiance_QCRad):
expected = irradiance_QCRad
ghi_out_expected = expected['ghi_limit_flag']
ghi_out = validator.check_ghi_limits_QCRad(expected['ghi'],
expected['solar_zenith'],
expected['dni_extra'])
assert_series_equal(ghi_out, ghi_out_expected)
def test_check_dhi_limits_QCRad(irradiance_QCRad):
expected = irradiance_QCRad
dhi_out_expected = expected['dhi_limit_flag']
dhi_out = validator.check_dhi_limits_QCRad(expected['dhi'],
expected['solar_zenith'],
expected['dni_extra'])
assert_series_equal(dhi_out, dhi_out_expected)
def test_check_dni_limits_QCRad(irradiance_QCRad):
expected = irradiance_QCRad
dni_out_expected = expected['dni_limit_flag']
dni_out = validator.check_dni_limits_QCRad(expected['dni'],
expected['solar_zenith'],
expected['dni_extra'])
assert_series_equal(dni_out, dni_out_expected)
def test_check_irradiance_limits_QCRad(irradiance_QCRad):
expected = irradiance_QCRad
ghi_out_expected = expected['ghi_limit_flag']
ghi_out, dhi_out, dni_out = validator.check_irradiance_limits_QCRad(
expected['solar_zenith'], expected['dni_extra'], ghi=expected['ghi'])
assert_series_equal(ghi_out, ghi_out_expected)
assert dhi_out is None
assert dni_out is None
dhi_out_expected = expected['dhi_limit_flag']
ghi_out, dhi_out, dni_out = validator.check_irradiance_limits_QCRad(
expected['solar_zenith'], expected['dni_extra'], ghi=expected['ghi'],
dhi=expected['dhi'])
assert_series_equal(dhi_out, dhi_out_expected)
dni_out_expected = expected['dni_limit_flag']
ghi_out, dhi_out, dni_out = validator.check_irradiance_limits_QCRad(
expected['solar_zenith'], expected['dni_extra'],
dni=expected['dni'])
assert_series_equal(dni_out, dni_out_expected)
def test_check_irradiance_consistency_QCRad(irradiance_QCRad):
expected = irradiance_QCRad
cons_comp, diffuse = validator.check_irradiance_consistency_QCRad(
expected['ghi'], expected['solar_zenith'], expected['dni_extra'],
expected['dhi'], expected['dni'])
assert_series_equal(cons_comp, expected['consistent_components'])
assert_series_equal(diffuse, expected['diffuse_ratio_limit'])
@pytest.fixture
def weather():
output = pd.DataFrame(columns=['air_temperature', 'wind_speed',
'relative_humidity',
'extreme_temp_flag', 'extreme_wind_flag',
'extreme_rh_flag'],
data=np.array([[-40, -5, -5, 0, 0, 0],
[10, 10, 50, 1, 1, 1],
[140, 55, 105, 0, 0, 0]]))
dtypes = ['float64', 'float64', 'float64', 'bool', 'bool', 'bool']
for (col, typ) in zip(output.columns, dtypes):
output[col] = output[col].astype(typ)
return output
def test_check_temperature_limits(weather):
expected = weather
result_expected = expected['extreme_temp_flag']
result = validator.check_temperature_limits(expected['air_temperature'])
assert_series_equal(result, result_expected)
def test_check_wind_limits(weather):
expected = weather
result_expected = expected['extreme_wind_flag']
result = validator.check_wind_limits(expected['wind_speed'])
assert_series_equal(result, result_expected)
def test_check_rh_limits(weather):
expected = weather
data = expected['relative_humidity']
result_expected = expected['extreme_rh_flag']
result = validator.check_rh_limits(data)
result.name = 'extreme_rh_flag'
assert_series_equal(result, result_expected)
def test_check_ac_power_limits():
index = pd.date_range(
start='20200401 0700', freq='2h', periods=6, tz='UTC')
power = pd.Series([0, -0.1, 0.1, 1, 1.1, -0.1], index=index)
day_night = pd.Series([0, 0, 0, 1, 1, 1], index=index, dtype='bool')
capacity = 1.
expected = pd.Series([1, 0, 0, 1, 0, 0], index=index).astype(bool)
out = validator.check_ac_power_limits(power, day_night, capacity)
assert_series_equal(out, expected)
def test_check_dc_power_limits():
index = pd.date_range(
start='20200401 0700', freq='2h', periods=6, tz='UTC')
power = pd.Series([0, -0.1, 0.1, 1, 1.3, -0.1], index=index)
day_night = pd.Series([0, 0, 0, 1, 1, 1], index=index, dtype='bool')
capacity = 1.
expected = pd.Series([1, 0, 0, 1, 0, 0], index=index).astype(bool)
out = validator.check_dc_power_limits(power, day_night, capacity)
assert_series_equal(out, expected)
def test_check_limits():
# testing with input type Series
expected = pd.Series(data=[True, False])
data = pd.Series(data=[3, 2])
result = validator._check_limits(val=data, lb=2.5)
assert_series_equal(expected, result)
result = validator._check_limits(val=data, lb=3, lb_ge=True)
| assert_series_equal(expected, result) | pandas.testing.assert_series_equal |
"""
Tasks
-------
Search and transform jsonable structures, specifically to make it 'easy' to make tabular/csv output for other consumers.
Example
~~~~~~~~~~~~~
*give me a list of all the fields called 'id' in this stupid, gnarly
thing*
>>> Q('id',gnarly_data)
['id1','id2','id3']
Observations:
---------------------
1) 'simple data structures' exist and are common. They are tedious
to search.
2) The DOM is another nested / treeish structure, and jQuery selector is
a good tool for that.
3a) R, Numpy, Excel and other analysis tools want 'tabular' data. These
analyses are valuable and worth doing.
3b) Dot/Graphviz, NetworkX, and some other analyses *like* treeish/dicty
things, and those analyses are also worth doing!
3c) Some analyses are best done using 'one-off' and custom code in C, Python,
or another 'real' programming language.
4) Arbitrary transforms are tedious and error prone. SQL is one solution,
XSLT is another,
5) the XPATH/XML/XSLT family is.... not universally loved :) They are
very complete, and the completeness can make simple cases... gross.
6) For really complicated data structures, we can write one-off code. Getting
80% of the way is mostly okay. There will always have to be programmers
in the loop.
7) Re-inventing SQL is probably a failure mode. So is reinventing XPATH, XSLT
and the like. Be wary of mission creep! Re-use when possible (e.g., can
we put the thing into a DOM using
8) If the interface is good, people can improve performance later.
Simplifying
---------------
1) Assuming 'jsonable' structures
2) keys are strings or stringlike. Python allows any hashable to be a key.
for now, we pretend that doesn't happen.
3) assumes most dicts are 'well behaved'. DAG, no cycles!
4) assume that if people want really specialized transforms, they can do it
themselves.
"""
from __future__ import print_function
from collections import namedtuple
import csv
import itertools
from itertools import product
from operator import attrgetter as aget, itemgetter as iget
import operator
import sys
from pandas.compat import map, u, callable, Counter
import pandas.compat as compat
## note 'url' appears multiple places and not all extensions have same struct
ex1 = {
'name': 'Gregg',
'extensions': [
{'id':'hello',
'url':'url1'},
{'id':'gbye',
'url':'url2',
'more': dict(url='url3')},
]
}
## much longer example
ex2 = {u('metadata'): {u('accessibilities'): [{u('name'): u('accessibility.tabfocus'),
u('value'): 7},
{u('name'): u('accessibility.mouse_focuses_formcontrol'), u('value'): False},
{u('name'): u('accessibility.browsewithcaret'), u('value'): False},
{u('name'): u('accessibility.win32.force_disabled'), u('value'): False},
{u('name'): u('accessibility.typeaheadfind.startlinksonly'), u('value'): False},
{u('name'): u('accessibility.usebrailledisplay'), u('value'): u('')},
{u('name'): u('accessibility.typeaheadfind.timeout'), u('value'): 5000},
{u('name'): u('accessibility.typeaheadfind.enabletimeout'), u('value'): True},
{u('name'): u('accessibility.tabfocus_applies_to_xul'), u('value'): False},
{u('name'): u('accessibility.typeaheadfind.flashBar'), u('value'): 1},
{u('name'): u('accessibility.typeaheadfind.autostart'), u('value'): True},
{u('name'): u('accessibility.blockautorefresh'), u('value'): False},
{u('name'): u('accessibility.browsewithcaret_shortcut.enabled'),
u('value'): True},
{u('name'): u('accessibility.typeaheadfind.enablesound'), | u('value') | pandas.compat.u |
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Series,
)
import pandas._testing as tm
dt_data = [
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
]
tz_data = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
pd.Timestamp("2011-01-03", tz="US/Eastern"),
]
td_data = [
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
]
period_data = [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Period("2011-03", freq="M"),
]
data_dict = {
"bool": [True, False, True],
"int64": [1, 2, 3],
"float64": [1.1, np.nan, 3.3],
"category": Categorical(["X", "Y", "Z"]),
"object": ["a", "b", "c"],
"datetime64[ns]": dt_data,
"datetime64[ns, US/Eastern]": tz_data,
"timedelta64[ns]": td_data,
"period[M]": period_data,
}
class TestConcatAppendCommon:
"""
Test common dtype coercion rules between concat and append.
"""
@pytest.fixture(params=sorted(data_dict.keys()))
def item(self, request):
key = request.param
return key, data_dict[key]
item2 = item
def _check_expected_dtype(self, obj, label):
"""
Check whether obj has expected dtype depending on label
considering not-supported dtypes
"""
if isinstance(obj, Index):
assert obj.dtype == label
elif isinstance(obj, Series):
if label.startswith("period"):
assert obj.dtype == "Period[M]"
else:
assert obj.dtype == label
else:
raise ValueError
def test_dtypes(self, item):
# to confirm test case covers intended dtypes
typ, vals = item
self._check_expected_dtype(Index(vals), typ)
self._check_expected_dtype(Series(vals), typ)
def test_concatlike_same_dtypes(self, item):
# GH 13660
typ1, vals1 = item
vals2 = vals1
vals3 = vals1
if typ1 == "category":
exp_data = Categorical(list(vals1) + list(vals2))
exp_data3 = Categorical(list(vals1) + list(vals2) + list(vals3))
else:
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3)
tm.assert_index_equal(res, exp)
# index.append name mismatch
i1 = | Index(vals1, name="x") | pandas.Index |
"""Predict lexical norms, either to evaluate word vectors, or to get norms for unnormed words."""
import numpy as np
import pandas as pd
import sklearn.linear_model
import sklearn.model_selection
import sklearn.preprocessing
import sklearn.utils
import argparse
import os
from .vecs import Vectors
from .utensils import log_timer
import logging
logging.basicConfig(format='[{levelname}] {message}', style='{', level=logging.INFO)
path = os.path.dirname(__file__)
@log_timer
def evaluate_norms(lang, vecs_fname, alpha=1.0):
"""Predict lexical norms to evaluate a set of word vectors in a given language.
Writes scores to tab-separated text file but also returns them.
:param lang: language to evaluate word vectors in (uses two-letter ISO codes)
:param vecs_fname: word vectors to evaluate
:param alpha: regularization strength, default 1.0, set higher for small datasets
:return: pandas DataFrame containing the norms results
"""
norms_path = os.path.join(path, 'datasets', 'norms')
if not os.path.exists('results'):
os.mkdir('results')
results_path = os.path.join('results', 'norms')
if not os.path.exists(results_path):
os.mkdir(results_path)
logging.info(f'evaluating lexical norm prediction with {vecs_fname}')
vectors = Vectors(vecs_fname, normalize=True, n=1e6, d=300)
scores = []
for norms_fname in os.listdir(norms_path):
if norms_fname.startswith(lang):
logging.info(f'predicting norms from {norms_fname}')
norms = pd.read_csv(os.path.join(norms_path, norms_fname), sep='\t', comment='#')
norms = norms.set_index('word')
score = predict_norms(vectors, norms, alpha)['scores']
score['source'] = norms_fname
scores.append(score)
scores_fname = os.path.split(vecs_fname)[1].replace('.vec', '.tsv')
if len(scores) > 0:
scores = pd.concat(scores)
scores.to_csv(os.path.join(results_path, scores_fname), sep='\t', index=False)
return scores
@log_timer
def predict_norms(vectors, norms, alpha=1.0):
"""Predict lexical norms and return score.
:param vectors: Vectors object containing word vectors
:param norms: pandas DataFrame of lexical norms
:param alpha: regularization strength, default 1.0, set higher for small datasets
:return: dict containing scores and predictions in separate pandas DataFrames
"""
vecs_df = vectors.as_df()
cols = norms.columns.values
df = norms.join(vecs_df, how='inner')
# compensate for missing ys somehow
total = len(norms)
missing = len(norms) - len(df)
penalty = (total - missing) / total
logging.info(f'missing vectors for {missing} out of {total} words')
df = sklearn.utils.shuffle(df) # shuffle is important for unbiased results on ordered datasets!
model = sklearn.linear_model.Ridge(alpha=alpha) # use ridge regression models
cv = sklearn.model_selection.RepeatedKFold(n_splits=5, n_repeats=10)
# compute crossvalidated prediction scores
scores = []
for col in cols:
# set dependent variable and calculate 10-fold mean fit/predict scores
df_subset = df.loc[:, vecs_df.columns.values] # use .loc[] so copy is created and no setting with copy warning is issued
df_subset[col] = df[col]
df_subset = df_subset.dropna() # drop NaNs for this specific y
x = df_subset[vecs_df.columns.values]
y = df_subset[col]
cv_scores = sklearn.model_selection.cross_val_score(model, x, y, cv=cv)
median_score = np.median(cv_scores)
penalized_score = median_score * penalty
scores.append({
'norm': col,
'adjusted r': np.sqrt(penalized_score), # take square root of explained variance to get Pearson r
'adjusted r-squared': penalized_score,
'r-squared': median_score,
'r': np.sqrt(median_score),
})
# predict (extend norms)
x_full = df[vecs_df.columns.values]
predictions = df.loc[:, cols] # use .loc[] so copy is created and no setting with copy warning is raised by pandas
for col in cols:
# set dependent variable and fit, but predict for whole x (so including unobserved y)
df_subset = df.loc[:, vecs_df.columns.values] # use .loc[] so copy is created and no setting with copy warning is raised
df_subset[col] = df[col]
df_subset = df_subset.dropna() # drop NaNs for this specific y
x = df_subset[vecs_df.columns.values]
y = df_subset[col]
model.fit(x, y)
predictions[f'{col} predicted'] = model.predict(x_full)
return {'scores': | pd.DataFrame(scores) | pandas.DataFrame |
# @name: metadata.py
# @summary: pulls metadata from an FCS experiment
# @description:
# @sources:
# @depends:
# @author: <NAME>
# @email: <EMAIL>
# @license: Apache-2.0
# @date: 23 April 2018
# [Import dependencies] ---------------------------------------------------------------------------------
# --- data manipulation ---
import pandas as pd
import re # regular expression / string matching
# --- file manipulation ---
import os
import shutil
import zipfile
# --- flow cytometry ---
import FlowCytometryTools
from FlowCytometryTools import FCMeasurement
# [] Outermost functions ---------------------------------------------------------------------------------
# What should be called after reading
# Loops over a row in the plate lookup table to rename the files and pull the metadata
def getmd_renamefiles(plates, expt_dict, fcsfile, platefile, fluorfile):
for expt_id, expt_dirs in expt_dict.items():
print(expt_id)
# only grab the portion of the plate lookup table containing current expt
filtered_plates = plates[plates.expt_id == expt_id]
getmd_renamefiles_1expt(fcsfile, filtered_plates, expt_id, expt_dirs)
# copy the plate layout and fluorescence data to the metadata and input dirs
copy_expt_data(expt_dirs, expt_id, platefile, fluorfile)
def copy_expt_data(expt_dirs, expt_id, platefile, fluorfile):
new_plate = f'{expt_dirs["metadir"]}/{expt_dirs["type"]}-{expt_id}_PlateLayout.xlsx'
new_fluor = f'{expt_dirs["inputdir"]}/{expt_dirs["type"]}-{expt_id}_FlowJoExport.xlsx'
shutil.copyfile(platefile, new_plate)
shutil.copyfile(fluorfile, new_fluor)
def getmd_renamefiles_1expt(fcsfile, plates, expt_id, expt_dirs):
# metadata holder
md = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import ast
import sys
import os.path
from pandas.core.algorithms import isin
sys.path.insert(1,
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
import dateutil.parser as parser
from utils.mysql_utils import separator
from utils.io import read_json
from utils.scraping_utils import remove_html_tags
from utils.user_utils import infer_role
from graph.arango_utils import *
import pgeocode
def cast_to_float(v):
try:
return float(v)
except ValueError:
return v
def convert_to_iso8601(text):
date = parser.parse(text)
return date.isoformat()
def load_member_summaries(
source_dir="data_for_graph/members",
filename="company_check",
# concat_uk_sector=False
):
'''
LOAD FLAT FILES OF MEMBER DATA
'''
dfs = []
for membership_level in ("Patron", "Platinum", "Gold", "Silver", "Bronze", "Digital", "Freemium"):
summary_filename = os.path.join(source_dir, membership_level, f"{membership_level}_{filename}.csv")
print ("reading summary from", summary_filename)
dfs.append(pd.read_csv(summary_filename, index_col=0).rename(columns={"database_id": "id"}))
summaries = pd.concat(dfs)
# if concat_uk_sector:
# member_uk_sectors = pd.read_csv(f"{source_dir}/members_to_sector.csv", index_col=0)
# # for col in ("sectors", "divisions", "groups", "classes"):
# # member_uk_sectors[f"UK_{col}"] = member_uk_sectors[f"UK_{col}"].map(ast.literal_eval)
# summaries = summaries.join(member_uk_sectors, on="member_name", how="left")
return summaries
def populate_sectors(
source_dir="data_for_graph",
db=None):
'''
CREATE AND ADD SECTOR(AS DEFINED IN MIM DB) NODES TO GRAPH
'''
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Sectors", db)
sectors = pd.read_csv(f"{source_dir}/all_sectors.csv", index_col=0)
i = 0
for _, row in sectors.iterrows():
sector_name = row["sector_name"]
print ("creating document for sector", sector_name)
document = {
"_key": str(i),
"name": sector_name,
"sector_name": sector_name,
"id": row["id"]
}
insert_document(db, collection, document)
i += 1
def populate_commerces(
data_dir="data_for_graph",
db=None):
'''
CREATE AND ADD COMMERCE(AS DEFINED IN MIM DB) NODES TO GRAPH
'''
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Commerces", db)
commerces = pd.read_csv(f"{data_dir}/all_commerces_with_categories.csv", index_col=0)
commerces = commerces.drop_duplicates("commerce_name")
i = 0
for _, row in commerces.iterrows():
commerce = row["commerce_name"]
category = row["commerce_category"]
print ("creating document for commerce", commerce)
document = {
"_key": str(i),
"name": commerce,
"commerce": commerce,
"category": category,
"id": row["id"]
}
insert_document(db, collection, document)
i += 1
def populate_members(
cols_of_interest=[
"id",
"member_name",
"website",
"about_company",
"membership_level",
"tenancies",
"badges",
"accreditations",
"sectors", # add to member as list
"buys",
"sells",
"sic_codes",
"directors",
"Cash_figure",
"NetWorth_figure",
"TotalCurrentAssets_figure",
"TotalCurrentLiabilities_figure",
],
db=None):
'''
CREATE AND POPULATE MEMBER NODES
'''
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Members", db, )
members = load_member_summaries(concat_uk_sector=False)
members = members[cols_of_interest]
members = members.drop_duplicates("member_name") # ensure no accidental duplicates
members = members.loc[~pd.isnull(members["tenancies"])]
members["about_company"] = members["about_company"].map(remove_html_tags, na_action="ignore")
members = members.sort_values("member_name")
i = 0
for _, row in members.iterrows():
member_name = row["member_name"]
if pd.isnull(member_name):
continue
document = {
"_key" : str(i),
"name": member_name,
**{
k: (row[k].split(separator) if not pd.isnull(row[k]) and k in {"sectors", "buys", "sells"}
else ast.literal_eval(row[k]) if not pd.isnull(row[k]) and k in {
"UK_sectors",
"UK_divisions",
"UK_groups",
"UK_classes",
"sic_codes",
"directors",
}
else cast_to_float(row[k]) if k in {"Cash_figure","NetWorth_figure","TotalCurrentAssets_figure","TotalCurrentLiabilities_figure"}
else row[k] if not pd.isnull(row[k])
else None)
for k in cols_of_interest
},
}
if not pd.isnull(row["directors"]):
directors_ = ast.literal_eval(row["directors"])
directors = []
for director in directors_:
if pd.isnull(director["director_name"]):
continue
if not pd.isnull(director["director_date_of_birth"]):
director["director_date_of_birth"] = insert_space(director["director_date_of_birth"], 3)
directors.append(director)
else:
directors = []
document["directors"] = directors
assert not pd.isnull(row["tenancies"])
tenancies = []
regions = []
for tenancy in row["tenancies"].split(separator):
tenancies.append(tenancy)
if tenancy == "Made in the Midlands":
regions.append("midlands")
else:
assert tenancy == "Made in Yorkshire", tenancy
regions.append("yorkshire")
document["tenancies"] = tenancies
document["regions"] = regions
for award in ("badge", "accreditation"):
award_name = f"{award}s"
if not pd.isnull(row[award_name]):
awards = []
for a in row[award_name].split(separator):
awards.append(a)
document[award_name] = awards
insert_document(db, collection, document)
i += 1
def add_SIC_hierarchy_to_members(db=None):
'''
USE SIC CODES TO MAP TO SECTOR USING FILE:
data/class_to_sector.json
'''
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Members", db, )
get_sic_codes_query = f'''
FOR m IN Members
FILTER m.sic_codes != NULL
RETURN {{
_key: m._key,
sic_codes: m.sic_codes,
}}
'''
members = aql_query(db, get_sic_codes_query)
class_to_sector_map = read_json("data/class_to_sector.json")
for member in members:
sic_codes = member["sic_codes"]
sic_codes = [sic_code.split(" - ")[1]
for sic_code in sic_codes]
classes = set()
groups = set()
divisions = set()
sectors = set()
for sic_code in sic_codes:
if sic_code not in class_to_sector_map:
continue
classes.add(sic_code)
groups.add(class_to_sector_map[sic_code]["group"])
divisions.add(class_to_sector_map[sic_code]["division"])
sectors.add(class_to_sector_map[sic_code]["sector"])
document = {
"_key" : member["_key"],
"UK_classes": sorted(classes),
"UK_groups": sorted(groups),
"UK_divisions": sorted(divisions),
"UK_sectors": sorted(sectors),
}
insert_document(db, collection, document, verbose=True)
def populate_users(
data_dir="data_for_graph",
cols_of_interest=[
"id",
"full_name",
"email",
"company_name",
"company_position",
"company_role",
],
db=None):
'''
CREATE AND ADD USER NODES
'''
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Users", db, )
user_filename = f"{data_dir}/all_users.csv"
users = pd.read_csv(user_filename, index_col=0)
users["company_role"] = users.apply(
infer_role,
axis=1
)
i = 0
for _, row in users.iterrows():
user_name = row["full_name"]
if pd.isnull(user_name):
continue
document = {
"_key" : str(i),
"name": user_name,
**{
k: (row[k] if not pd.isnull(row[k]) else None)
for k in cols_of_interest
}
}
print ("inserting data", document)
insert_document(db, collection, document)
i += 1
def populate_user_works_at(
data_dir="data_for_graph",
db=None):
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("UserWorksAt", db, className="Edges")
user_filename = f"{data_dir}/all_users.csv"
users = pd.read_csv(user_filename, index_col=0)
users["company_role"] = users.apply(
infer_role,
axis=1
)
member_name_to_id = name_to_id(db, "Members", "id")
user_name_to_id = name_to_id(db, "Users", "id")
i = 0
for _, row in users.iterrows():
user_id = row["id"]
company_id = row["company_id"]
if user_id not in user_name_to_id:
continue
if company_id not in member_name_to_id:
continue
document = {
"_key" : str(i),
"name": "works_at",
"_from": user_name_to_id[user_id],
"_to": member_name_to_id[company_id],
"company_position": row["company_position"]
}
print ("inserting data", document)
insert_document(db, collection, document)
i += 1
def populate_user_follows(
data_dir="data_for_graph",
db=None):
if db is None:
db = connect_to_mim_database()
user_follows_collection = connect_to_collection("UserFollows", db, className="Edges")
user_follows_members_collection = connect_to_collection("MemberMemberFollows", db, className="Edges")
user_follows_filename = os.path.join(data_dir, "all_user_follows.csv")
users = pd.read_csv(user_follows_filename, index_col=0)
member_name_to_id = name_to_id(db, "Members", "id")
user_name_to_id = name_to_id(db, "Users", "id")
i = 0
for _, row in users.iterrows():
user_id = row["id"]
if user_id not in user_name_to_id:
continue
user_name = row["full_name"]
employer_id = row["employer_id"]
followed_member_id = row["followed_member_id"]
if followed_member_id not in member_name_to_id:
continue
# user -> member
document = {
"_key" : str(i),
"name": "follows",
"_from": user_name_to_id[user_id],
"_to": member_name_to_id[followed_member_id]
}
print ("inserting data", document)
insert_document(db, user_follows_collection, document)
# member -> member
if employer_id in member_name_to_id:
document = {
"_key" : str(i),
"name": "follows",
"_from": member_name_to_id[employer_id],
"_to": member_name_to_id[followed_member_id],
"followed_by": user_name,
}
print ("inserting data", document)
insert_document(db, user_follows_members_collection, document)
i += 1
def populate_member_sectors(
db=None):
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("InSector", db, className="Edges")
members = load_member_summaries()
i = 0
member_name_to_id = name_to_id(db, "Members", "id")
sector_name_to_id = name_to_id(db, "Sectors", "sector_name")
for _, row in members.iterrows():
member_id = row["id"]
if member_id not in member_name_to_id:
continue
sectors = row["sectors"]
if pd.isnull(sectors):
continue
sectors = sectors.split(separator)
for sector in sectors:
document = {
"_key" : str(i),
"name": "in_sector",
"_from": member_name_to_id[member_id],
"_to": sector_name_to_id[sector],
}
print ("inserting data", document)
insert_document(db, collection, document)
i += 1
def populate_member_commerces(
db=None):
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("PerformsCommerce", db, className="Edges")
members = load_member_summaries()
i = 0
member_name_to_id = name_to_id(db, "Members", "id")
commerce_name_to_id = name_to_id(db, "Commerces", "commerce")
for _, row in members.iterrows():
member_id = row["id"]
if member_id not in member_name_to_id:
continue
for commerce_type in ("buys", "sells"):
commerce = row[commerce_type]
if not pd.isnull(commerce):
commerce = commerce.split(separator)
for c in commerce:
if c=="":
assert False
continue
document = {
"_key" : str(i),
"name": commerce_type,
"_from": member_name_to_id[member_id],
"_to": commerce_name_to_id[c],
"commerce_type": commerce_type
}
print ("inserting data", document)
insert_document(db, collection, document)
i += 1
def populate_messages(
data_dir="data_for_graph",
db=None):
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Messages", db, className="Edges")
message_filename = os.path.join(data_dir, "all_messages.csv")
messages = pd.read_csv(message_filename, index_col=0)
messages = messages.drop_duplicates()
i = 0
user_name_to_id = name_to_id(db, "Users", "id")
for _, row in messages.iterrows():
sender_id = row["sender_id"]
if sender_id not in user_name_to_id:
continue
subject = row["subject"]
message = row["message"]
message = remove_html_tags(message)
timestamp = str(row["created_at"])
# TODO characterise messages
# recipients = json.loads(row["all_recipients"])
# for recipient in recipients:
# receiver = recipient["name"]
receiver_id = row["recipient_id"]
# receiver_member = row["recipient_member_name"]
if receiver_id not in user_name_to_id:
continue
if sender_id == receiver_id:
continue
document = {
"_key": str(i),
"name": "messages",
"_from": user_name_to_id[sender_id],
"_to": user_name_to_id[receiver_id],
"subject": subject,
"message": message,
"sent_at": convert_to_iso8601(timestamp),
}
insert_document(db, collection, document)
i += 1
def populate_member_member_business(
db=None):
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("MemberMemberBusiness", db, className="Edges")
member_name_to_id = name_to_id(db, "Members", "member_name")
i = 0
# articles
for region in ("yorkshire", "midlands"):
filename = os.path.join("members", f"member_member_partnerships - {region}_matched.csv")
member_member_business = pd.read_csv(filename, index_col=None)
for _, row in member_member_business.iterrows():
member_1 = row["member_1_best_matching_member"]
member_2 = row["member_2_best_matching_member"]
if member_1 not in member_name_to_id:
continue
if member_2 not in member_name_to_id:
continue
article_title = row["article_title"]
document = {
# "_key": sanitise_key(f"{member_1}_{member_2}_article"),
"_key": str(i),
"name": "does_business",
# "_from": f"Members/{sanitise_key(member_1)}",
"_from": member_name_to_id[member_1],
# "_to": f"Members/{sanitise_key(member_2)}",
"_to": member_name_to_id[member_2],
"source": "article",
"article_title": article_title,
"region": region
}
insert_document(db, collection, document)
i += 1
# survey connections
connections_filename="survey/final_processed_connections.csv"
survey_connections = | pd.read_csv(connections_filename, index_col=0) | pandas.read_csv |
"""
This module contains functions related to ML-matcher, that is common across
all the ML-matchers.
"""
import logging
# import numpy as np
import pandas as pd
# import dask
import dask
from dask import delayed
from dask.diagnostics import ProgressBar
import py_entitymatching.catalog.catalog_manager as cm
from py_entitymatching.matcher.matcher import Matcher
from py_entitymatching.matcher.matcherutils import get_true_lbl_index
import py_entitymatching.utils.catalog_helper as ch
import py_entitymatching.utils.generic_helper as gh
from py_entitymatching.utils.validation_helper import validate_object_type
from py_entitymatching.dask.utils import validate_chunks, get_num_partitions, \
get_num_cores, wrap
logger = logging.getLogger(__name__)
class DaskMLMatcher(Matcher):
"""
ML Matcher class.
"""
def _fit_sklearn(self, x, y, check_rem=True):
"""
This function mimics fit method supported by sk-learn.
"""
# From the given input, derive the data that can be used for sk-learn
# methods.
x, y = self._get_data_for_sklearn(x, y, check_rem=check_rem)
# Call the fit method from the underlying classifier.
self.clf.fit(x, y)
return True
def _fit_ex_attrs(self, table, exclude_attrs, target_attr):
"""
This function supports the fit method, where the DataFrame can be
given as input along with what attributes must be excluded and the
target attribute.
"""
# Validate the input parameters.
# # We expect the input table to be of type pandas DataFrame.
if not isinstance(table, pd.DataFrame):
logger.error('Input table is not of type DataFrame')
raise AssertionError('Input table is not of type DataFrame')
# Convert the exclude attributes into list (if the input is not of list)
if not isinstance(exclude_attrs, list):
exclude_attrs = [exclude_attrs]
# Check if the exclude attributes are present in the input table. If
# not, raise an error.
if not ch.check_attrs_present(table, exclude_attrs):
logger.error(
'The attributes mentioned in exclude_attrs is not present ' \
'in the input table')
raise AssertionError(
'The attributes mentioned in exclude_attrs is not present ' \
'in the input table')
# Check if the target attribute is present in the input table. If
# not, raise an error.
if not ch.check_attrs_present(table, target_attr):
logger.error('The target_attr is not present in the input table')
raise AssertionError(
'The target_attr is not present in the input table')
# We now remove duplicate attributes from the exclude_attrs
exclude_attrs = gh.list_drop_duplicates(exclude_attrs)
# We explicitly append target attribute to exclude attributes
if target_attr not in exclude_attrs:
exclude_attrs.append(target_attr)
# Now, we get the attributes to project
attributes_to_project = gh.list_diff(list(table.columns), exclude_attrs)
# Get the predictors and the target attribute from the input table
# based on the exclude attrs and the target attribute.
x = table[attributes_to_project]
y = table[target_attr]
self._fit_sklearn(x, y, check_rem=False)
def fit(self, x=None, y=None, table=None, exclude_attrs=None,
target_attr=None):
"""
Fit interface for the matcher.
Specifically, there are two ways the user can call the fit method.
First, interface similar to scikit-learn where the feature vectors
and target attribute given as projected DataFrame.
Second, give the DataFrame and explicitly specify the feature vectors
(by specifying the attributes to be excluded) and the target attribute.
A point to note is all the input parameters have a default value of
None. This is done to support both the interfaces in a single function.
Args:
x (DataFrame): The input feature vectors given as pandas
DataFrame (defaults to None).
y (DatFrame): The input target attribute given as pandas
DataFrame with a single column (defaults to None).
table (DataFrame): The input pandas DataFrame containing feature
vectors and target attribute (defaults to None).
exclude_attrs (list): The list of attributes that should be
excluded from the input table to get the feature vectors.
target_attr (string): The target attribute in the input table.
"""
# Check if x and y is given, then call a function that handles
# sk-learn like interface input.
if x is not None and y is not None:
self._fit_sklearn(x, y)
# Check if table and its associated attributes, then call the
# appropriate function that handles it.
elif (
table is not None and exclude_attrs is not None) \
and target_attr is not None:
self._fit_ex_attrs(table, exclude_attrs, target_attr)
else:
# If the syntax is not what we expect, raise an syntax error.
raise SyntaxError(
'The arguments supplied does not match the signatures '
'supported !!!')
def _predict_sklearn(self, x, check_rem=True, return_prob=False):
# Function that implements, predict interface mimic-ing sk-learn's
# predict interface.
# Here check_rem parameter requires a bit of explanation. The
# check_rem flag checks if the input table has '_id' attribute if so
# and if check_rem is True then we remove the '_id' attribute from
# the table.
# Note: Here check_rem is just passing what is coming in i.e it can be
# true or false based up on who is calling it.
x = self._get_data_for_sklearn(x, check_rem=check_rem)
# Call the underlying predict function.
y = self.clf.predict(x)
if not return_prob:
# Return the predictions
return y
else:
_p = self.clf.predict_proba(x)
true_index = get_true_lbl_index(self.clf)
return y, _p[:, true_index]
def _predict_ex_attrs(self, table, exclude_attrs, return_prob=False):
"""
Variant of predict method, where data is derived based on exclude
attributes.
"""
# Validate input parameters
# # We expect input table to be a pandas DataFrame.
if not isinstance(table, pd.DataFrame):
logger.error('Input table is not of type DataFrame')
raise AssertionError('Input table is not of type DataFrame')
# # We expect the exclude attributes to be a list, if not convert it
# into a list.
if not isinstance(exclude_attrs, list):
exclude_attrs = [exclude_attrs]
# Check if the input table contains the attributes to be excluded. If
# not raise an error.
if not ch.check_attrs_present(table, exclude_attrs):
logger.error(
'The attributes mentioned in exclude_attrs is not present ' \
'in the input table')
raise AssertionError(
'The attributes mentioned in exclude_attrs is not present ' \
'in the input table')
# Get the attributes to project.
attributes_to_project = gh.list_diff(list(table.columns), exclude_attrs)
# Get feature vectors and the target attribute
x = table[attributes_to_project]
# Do the predictions and return the probabilities (if required)
res = self._predict_sklearn(x, check_rem=False, return_prob=return_prob)
return res
# if not just do the predictions and return the result
# if not return_prob:
# # Do the predictions using the ML-based matcher.
# y = self._predict_sklearn(x, check_rem=False)
#
# # Finally return the predictions
# return y
# else:
# res = self._predict_sklearn()
def predict(self, x=None, table=None, exclude_attrs=None, target_attr=None,
append=False, return_probs=False, probs_attr=None, inplace=True,
show_progress=False, n_chunks=1):
"""
WARNING THIS COMMAND IS EXPERIMENTAL AND NOT TESTED. USE AT YOUR OWN RISK.
Predict interface for the matcher.
Specifically, there are two ways the user can call the predict method.
First, interface similar to scikit-learn where the feature vectors
given as projected DataFrame.
Second, give the DataFrame and explicitly specify the feature vectors
(by specifying the attributes to be excluded) .
A point to note is all the input parameters have a default value of
None. This is done to support both the interfaces in a single function.
Currently, the Dask implementation supports only the cases when the table is not
None and the flags inplace, append are False.
Args:
x (DataFrame): The input pandas DataFrame containing only feature
vectors (defaults to None).
table (DataFrame): The input pandas DataFrame containing feature
vectors, and may be other attributes (defaults to None).
exclude_attrs (list): A list of attributes to be excluded from the
input table to get the feature vectors (defaults to None).
target_attr (string): The attribute name where the predictions
need to be stored in the input table (defaults to None).
probs_attr (string): The attribute name where the prediction probabilities
need to be stored in the input table (defaults to None).
append (boolean): A flag to indicate whether the predictions need
to be appended in the input DataFrame (defaults to False).
return_probs (boolean): A flag to indicate where the prediction probabilities
need to be returned (defaults to False). If set to True, returns the
probability if the pair was a match.
inplace (boolean): A flag to indicate whether the append needs to be
done inplace (defaults to True).
show_progress (boolean): A flag to indicate whether the progress of
extracting feature vectors must be displayed (defaults to True).
n_chunks (int): The number of partitions to split the candidate set. If it
is set to -1, the number of partitions will be set to the
number of cores in the machine.
Returns:
An array of predictions or a DataFrame with predictions updated.
"""
logger.warning(
"WARNING THIS COMMAND IS EXPERIMENTAL AND NOT TESTED. USE AT YOUR OWN RISK.")
if x is not None:
return self._predict(x, table, exclude_attrs, target_attr, append,
return_probs, probs_attr, inplace)
else:
n_chunks = get_num_partitions(n_chunks, len(table))
if n_chunks == 1 or inplace == True or append == False:
# When the inplace flag is True, the predictions (and probs) are added
# in place. If he have to use Dask then we have to modify _predict (
# specifically _predict_sk_learn) function.
# So, to keep things simple, we support Dask only when
# inplace=False
# Similarly, when append=False, the return value from _predict will be
# different for different cases (for example, when return_probs is True
# or False). If we have to use Dask then we have to careful in
# recording the return values for each chunk.
# So, to keep things simple, we support Dask only when
# append=True
result = self._predict(table=table, exclude_attrs=exclude_attrs,
target_attr=target_attr, append=append,
return_probs=return_probs, probs_attr=probs_attr,
inplace=inplace, copy_props=True)
else:
predicted_results = []
splitted_tables = pd.np.array_split(table, n_chunks)
for i in range(len(splitted_tables)):
partial_result = delayed(self._predict)(table=splitted_tables[i],
exclude_attrs=exclude_attrs, target_attr=target_attr,
append=append,
return_probs=return_probs,
probs_attr=probs_attr,
inplace=inplace,
copy_props=False)
predicted_results.append(partial_result)
predicted_results = delayed(wrap)(predicted_results)
if show_progress:
with ProgressBar():
predicted_results = predicted_results.compute(
scheduler="processes", num_workers=get_num_cores())
else:
predicted_results = predicted_results.compute(
scheduler="processes", num_workers=get_num_cores())
result = pd.concat(predicted_results)
cm.copy_properties(table, result)
return result
# predict method
def _predict(self, x=None, table=None, exclude_attrs=None, target_attr=None,
append=False, return_probs=False,
probs_attr=None, inplace=True, copy_props=True):
"""
Delegated function from predict.
"""
# If x is not none, call the predict method that mimics sk-learn
# predict method.
if x is not None:
y = self._predict_sklearn(x, return_prob=return_probs)
# If the input table and the exclude attributes are not None,
# then call the appropriate predict method.
elif table is not None and exclude_attrs is not None:
y = self._predict_ex_attrs(table, exclude_attrs, return_prob=return_probs)
# If the append is True, update the table
if target_attr is not None and append is True:
# If inplace is True, then update the input table.
if inplace:
if return_probs:
table[target_attr] = y[0]
table[probs_attr] = y[1]
# Return the updated table
return table
else:
# Return the updated table
table[target_attr] = y
return table
else:
# else, create a copy and update it.
table_copy = table.copy()
if return_probs:
table_copy[target_attr] = y[0]
table_copy[probs_attr] = y[1]
else:
table_copy[target_attr] = y
# copy the properties from the input table to the output
# table.
if copy_props:
cm.copy_properties(table, table_copy)
# Return the new table.
return table_copy
else:
# else, raise a syntax error
raise SyntaxError(
'The arguments supplied does not match '
'the signatures supported !!!')
# Return the predictions
return y
# get and set name of matcher
def get_name(self):
# Return the name of the matcher
return self.name
def set_name(self, name):
# Set the name of the matcher
self.name = name
# helper functions
def _get_data_for_sklearn(self, x, y=None, check_rem=True):
"""
Gets data in a format that can be used to call sk-learn methods such
as fit and predict.
"""
# Validate input parameters.
# # We expect the input object (x) to be of type pandas DataFrame.
if not isinstance(x, pd.DataFrame):
logger.error('Input table is not of type DataFrame')
raise AssertionError('Input table is not of type DataFrame')
# Check to see if we have to remove id column
if x.columns[0] == '_id' and check_rem == True:
logger.warning(
'Input table contains "_id". '
'Removing this column for processing')
# Get the values from the DataFrame
x = x.values
# Remove the first column ('_id')
x = pd.np.delete(x, 0, 1)
else:
# Get the values from the DataFrame
x = x.values
if y is not None:
# Remove the _id column from the input.
if not isinstance(y, pd.Series) and y.columns[0] == '_id' \
and check_rem == True:
logger.warning(
'Input table contains "_id". '
'Removing this column for processing')
# Get the values from the DataFrame
y = y.values
y = | pd.np.delete(y, 0, 1) | pandas.np.delete |
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 26 15:13:19 2019
@author: kennedy
"""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__version__ = '1.0'
seed = 1333
from numpy.random import seed
seed(19)
from tensorflow import set_random_seed
set_random_seed(19)
import os
from STOCK import stock, loc
import pandas as pd
pd.options.mode.chained_assignment = None
import numpy as np
import lightgbm as lgb
from datetime import datetime
import matplotlib.pyplot as plt
from Preprocess import process_time
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor
from xgboost import XGBRegressor
from sklearn.ensemble import (AdaBoostRegressor, #Adaboost regressor
RandomForestRegressor, #Random forest regressor
GradientBoostingRegressor, #Gradient boosting
BaggingRegressor, #Bagging regressor
ExtraTreesRegressor) #Extratrees regressor
#get ojects in the dataset folder and
#strip extension
def ls_STOK():
'''
:Return:
List of stock in dataset
'''
DIR_OBJ = os.listdir()
STOK_list_ = []
for x in range(len(DIR_OBJ)):
STOK_list_.append(DIR_OBJ[x].strip('.csv'))
return STOK_list_
#%% SIGNAL GENERATOR --> MACD, BOLLINGER BAND, RSI
##RSI signal
def RSI_signal(STK_data, period, lw_bound, up_bound):
'''
:Arguments:
df:
:Return type:
signal
'''
stock_data = stock(STK_data)
OHLC = stock_data.OHLC()
df = stock_data.CutlerRSI(OHLC, period)
try:
assert isinstance(df, pd.Series) or isinstance(df, pd.DataFrame)
#convert to dataframe
if isinstance(df, pd.Series):
df = df.to_frame()
else:
pass
#get signal
#1--> indicates buy position
#0 --> indicates sell posotion
df['signal'] = np.zeros(df.shape[0])
pos = 0
for ij in df.loc[:, ['RSI_Cutler_'+str(period)]].values:
print(df.loc[:, ['RSI_Cutler_'+str(period)]].values[pos])
if df.loc[:, ['RSI_Cutler_'+str(period)]].values[pos] >= up_bound:
df['signal'][pos:] = 1 #uptrend
elif df.loc[:, ['RSI_Cutler_'+str(period)]].values[pos] <= lw_bound:
df['signal'][pos:] = 0 #downtrend
pos +=1
except:
pass
finally:
print('*'*40)
print('RSI Signal Generation completed')
print('*'*40)
return df
def macd_crossOver(STK_data, fast, slow, signal):
'''
:Argument:
MACD dataframe
:Return type:
MACD with Crossover signal
'''
stock_data = stock(STK_data)
df = stock_data.MACD(fast, slow, signal)
try:
assert isinstance(df, pd.DataFrame) or isinstance(df, pd.Series)
#dataframe
if isinstance(df, pd.Series):
df = df.to_frame()
else:
pass
#1--> indicates buy position
#0 --> indicates sell posotion
df['result'] = np.nan
df['signal'] = np.where(df.MACD > df.MACD_SIGNAL, 1, 0)
df['result'] = np.where((df['signal'] == 1) & (df['MACD_HIST'] >= 0), 1, 0)
except IOError as e:
raise('Dataframe required {}' .format(e))
finally:
print('*'*40)
print('MACD signal generated')
print('*'*40)
return df
def SuperTrend_signal(STK_data, multiplier, period):
'''
:Argument:
MACD dataframe
:Return type:
MACD with Crossover signal
'''
stock_data = stock(STK_data)
df = stock_data.SuperTrend(STK_data, multiplier, period)
try:
assert isinstance(df, pd.DataFrame) or isinstance(df, pd.Series)
#dataframe
if isinstance(df, pd.Series):
df = df.to_frame()
else:
pass
#1--> indicates buy position
#0 --> indicates sell posotion
df = df.fillna(0)
df['signal'] = np.nan
df['signal'] = np.where(stock_data.Close >= df.SuperTrend, 1, 0)
except IOError as e:
raise('Dataframe required {}' .format(e))
finally:
print('*'*40)
print('SuperTrend Signal generated')
print('*'*40)
return df
def bollinger_band_signal(STK_data, period, deviation, strategy = ''):
'''
:Argument:
df:
:Return type:
:bollinger band signal
'''
stock_data = stock(STK_data)
Close = stock_data.Close
df = stock_data.Bolinger_Band(period, deviation)
df = df.fillna(value = 0)
assert isinstance(df, pd.DataFrame) or isinstance(df, pd.Series)
#dataframe
if isinstance(df, pd.Series):
df = df.to_frame()
#get signal
#1--> indicates buy position
#0 --> indicates sell posotion
df['signal'] = np.zeros(df.shape[0])
pos = 0
try:
if strategy == '' or strategy == '0' or strategy == '2':
for ii in Close:
print(Close[pos])
if Close[pos] >= df.Upper_band.values[pos]:
df['signal'][pos:] = 0
elif Close[pos] <= df.Lower_band.values[pos]:
df['signal'][pos:] = 1
pos += 1
elif strategy == '1' or strategy == '3':
for ii in Close:
print(Close[pos])
if Close[pos] >= df.Upper_band.values[pos]:
df['signal'][pos:] = 1
elif Close[pos] <= df.Lower_band.values[pos]:
df['signal'][pos:] = 0
pos += 1
else:
raise('You have entered an incorrect strategy value')
except:
pass
finally:
print('*'*40)
print('Bollinger Signal Generation completed')
print('*'*40)
return df
def trading_signal(RSI, MACD, Bollinger_Band, SuperTrend = None, strategy = ''):
'''
:Arguments:
:MACD:
dataframe containing MACD signal
:Bollinger_Band:
dataframe containing Bollinger band signal
:RSI:
dataframe containing RSI signal
:Return Type:
Buy Sell or Hold signal
'''
MACD_signal = MACD.signal.values
RSI_signal = RSI.signal.values
BB_signal = Bollinger_Band.signal.values
if strategy == '' or strategy == '0' or strategy == '1':
df_prediction = pd.DataFrame({'MACD_signal': MACD_signal,
'RSI_signal': RSI_signal,
'BB_signal': BB_signal})
else:
SuperTrend_Signal = SuperTrend.signal.values
df_prediction = pd.DataFrame({'MACD_signal': MACD_signal,
'RSI_signal': RSI_signal,
'BB_signal': BB_signal,
'SuperTrend_signal': SuperTrend_Signal})
df_prediction['POSITION'] = ''
try:
if strategy == '' or strategy == '0':
print('Calling default strategy')
for ij in range(data.shape[0]):
print(ij)
if MACD_signal[ij] == 1 and\
RSI_signal[ij] == 1 and\
BB_signal[ij] == 1:
df_prediction.POSITION[ij] = 'BUY'
elif MACD_signal[ij] == 0 and\
RSI_signal[ij] == 0 and\
BB_signal[ij] == 0:
df_prediction.POSITION[ij] = 'SELL'
else:
df_prediction.POSITION[ij] = 'HOLD'
elif strategy == '1':
print('Calling strategy %s'%strategy)
for ij in range(data.shape[0]):
print(ij)
if MACD_signal[ij] == 1 and\
RSI_signal[ij] == 1 and\
BB_signal[ij] == 1:
df_prediction.POSITION[ij] = 'BUY'
elif MACD_signal[ij] == 0 and\
RSI_signal[ij] == 0 and\
BB_signal[ij] == 0:
df_prediction.POSITION[ij] = 'SELL'
else:
df_prediction.POSITION[ij] = 'HOLD'
elif strategy == '2':
print('Calling strategy %s'%strategy)
for ij in range(data.shape[0]):
print(ij)
if MACD_signal[ij] == 1 and\
RSI_signal[ij] == 1 and\
BB_signal[ij] == 1 and\
SuperTrend_Signal[ij] == 1:
df_prediction.POSITION[ij] = 'BUY'
elif MACD_signal[ij] == 0 and\
RSI_signal[ij] == 0 and\
BB_signal[ij] == 0 and\
SuperTrend_Signal[ij] == 0:
df_prediction.POSITION[ij] = 'SELL'
else:
df_prediction.POSITION[ij] = 'HOLD'
elif strategy == '3':
print('Calling strategy %s'%strategy)
for ij in range(data.shape[0]):
print(ij)
if MACD_signal[ij] == 1 and\
RSI_signal[ij] == 1 and\
BB_signal[ij] == 1 and\
SuperTrend_Signal[ij] == 1:
df_prediction.POSITION[ij] = 'BUY'
elif MACD_signal[ij] == 0 and\
RSI_signal[ij] == 0 and\
BB_signal[ij] == 0 and\
SuperTrend_Signal[ij] == 0:
df_prediction.POSITION[ij] = 'SELL'
else:
df_prediction.POSITION[ij] = 'HOLD'
except:
pass
finally:
#-----------------------------------------------------------
#reset column and save to throw to csv
if strategy == '' or strategy == '0' or strategy == '1':
enlist = ['BB_signal', 'MACD_signal' , 'RSI_signal','POSITION']
df_prediction = df_prediction.reindex(columns=enlist)
else:
enlist = ['BB_signal', 'MACD_signal' , 'RSI_signal', 'SuperTrend_signal','POSITION']
df_prediction = df_prediction.reindex(columns=enlist)
print('*'*40)
print('Signal generation completed...')
print('*'*40)
return df_prediction
if __name__ == '__main__':
'''
----------------------------------
# Trading strategy
------------------------------------
[X][STRATEGY 0 or ''] --> USES DEFAULT BOLLINGER BAND:: BUY WHEN CLOSE IS BELOW LOWER BOLLINGER
SELL WHEN CLOSE IS ABOVE UPPER BOLLINGER BAND
[X][STRATEGY 1] --> SETS BOLLINGER TO:: BUY WHEN CLOSE IS ABOVE UPPER BOLLINGER BAND
AND SELL WHEN CLOSE IS BELOW LOWER BOLLINGER BAND.
[X][STRATEGY 2] --> USES STRATEGY 0 WITH SUPER TREND INDICATOR
[X][STRATEGY 3] --> USES STRATEGY 1 WITH SUPER TREND INDICATOR
'''
#---------GLOBAL SETTINGS-------------------
path = 'D:\\BITBUCKET_PROJECTS\\Forecasting 1.0\\'
STRATEGY = '3'
DEVIATION = MULTIPLIER = 2
PERIOD = 20
DATA_LIMIT = 400
#--------RSI_SETTINGS------------------------
LOWER_BOUND = 30
UPPER_BOUND = 70
#--------MACD SETTINGS-----------------------
FAST = 12
SLOW = 26
SIGNAL = 9
loc.set_path(path+'DATASET')
#-------get the data we need------------------
STOK_list = ls_STOK()
Signal_Gen = {}
for ii in range(DATA_LIMIT):
print('{}'.format(STOK_list[ii]))
data = loc.read_csv('{}'.format(STOK_list[ii]) + str('.csv'))
data.index = | pd.to_datetime(data.index) | pandas.to_datetime |
from datetime import datetime
import numpy as np
from pandas.tseries.frequencies import get_freq_code as _gfc
from pandas.tseries.index import DatetimeIndex, Int64Index
from pandas.tseries.tools import parse_time_string
import pandas.tseries.frequencies as _freq_mod
import pandas.core.common as com
import pandas.core.datetools as datetools
from pandas._tseries import Timestamp
import pandas._tseries as lib
#---------------
# Period logic
def to_period(arg, freq=None):
""" Attempts to convert arg to timestamp """
if arg is None:
return arg
if type(arg) == float:
raise TypeError("Cannot convert a float to period")
return Period(arg, freq=freq)
class Period(object):
def __init__(self, value=None, freq=None,
year=None, month=1, quarter=None, day=1,
hour=0, minute=0, second=0):
"""
Represents an period of time
Parameters
----------
value : Period or basestring, default None
The time period represented (e.g., '4Q2005')
freq : str, default None
e.g., 'B' for businessday, ('T', 5) or '5T' for 5 minutes
year : int, default None
month : int, default 1
quarter : int, default None
day : int, default 1
hour : int, default 0
minute : int, default 0
second : int, default 0
"""
# freq points to a tuple (base, mult); base is one of the defined
# periods such as A, Q, etc. Every five minutes would be, e.g.,
# ('T', 5) but may be passed in as a string like '5T'
self.freq = None
# ordinal is the period offset from the gregorian proleptic epoch
self.ordinal = None
if value is None:
if freq is None:
raise ValueError("If value is None, freq cannot be None")
if year is None:
raise ValueError("If value is None, year cannot be None")
if quarter is not None:
month = (quarter - 1) * 3 + 1
base, mult = _gfc(freq)
self.ordinal = lib.period_ordinal(year, month, day, hour, minute,
second, base, mult)
elif isinstance(value, Period):
other = value
if freq is None or _gfc(freq) == _gfc(other.freq):
self.ordinal = other.ordinal
freq = other.freq
else:
converted = other.asfreq(freq)
self.ordinal = converted.ordinal
elif isinstance(value, basestring):
value = value.upper()
dt, parsed, reso = parse_time_string(value)
if freq is None:
if reso == 'year':
freq = 'A'
elif reso == 'quarter':
freq = 'Q'
elif reso == 'month':
freq = 'M'
elif reso == 'day':
freq = 'D'
elif reso == 'hour':
freq = 'H'
elif reso == 'minute':
freq = 'T'
elif reso == 'second':
freq = 'S'
else:
raise ValueError("Could not infer frequency for period")
elif isinstance(value, datetime):
dt = value
if freq is None:
raise ValueError('Must supply freq for datetime value')
elif isinstance(value, (int, long)):
if value <= 0:
raise ValueError("Value must be positive")
self.ordinal = value
if freq is None:
raise ValueError('Must supply freq for ordinal value')
else:
msg = "Value must be Period, string, integer, or datetime"
raise ValueError(msg)
base, mult = _gfc(freq)
if self.ordinal is None:
self.ordinal = lib.period_ordinal(dt.year, dt.month, dt.day, dt.hour,
dt.minute, dt.second, base, mult)
self.freq = _freq_mod._get_freq_str(base, mult)
def __eq__(self, other):
if isinstance(other, Period):
return (self.ordinal == other.ordinal
and _gfc(self.freq) == _gfc(other.freq))
return False
def __add__(self, other):
if isinstance(other, (int, long)):
return Period(self.ordinal + other, self.freq)
raise ValueError("Cannot add with non-integer value")
def __sub__(self, other):
if isinstance(other, (int, long)):
return Period(self.ordinal - other, self.freq)
if isinstance(other, Period):
if other.freq != self.freq:
raise ValueError("Cannot do arithmetic with "
"non-conforming periods")
return self.ordinal - other.ordinal
raise ValueError("Cannot sub with non-integer value")
def asfreq(self, freq=None, how='E'):
"""
Parameters
----------
freq :
how :
Returns
-------
resampled : Period
"""
how = _validate_end_alias(how)
base1, mult1 = _gfc(self.freq)
base2, mult2 = _gfc(freq)
new_ordinal = lib.period_asfreq(self.ordinal, base1, mult1,
base2, mult2, how)
return Period(new_ordinal, (base2, mult2))
def start_time(self):
return self.to_timestamp(which_end='S')
def end_time(self):
return self.to_timestamp(which_end='E')
def to_timestamp(self, which_end='S'):
"""
Return the Timestamp at the start/end of the period
Parameters
----------
which_end: str, default 'S' (start)
'S', 'E'. Can be aliased as case insensitive
'Start', 'Finish', 'Begin', 'End'
Returns
-------
Timestamp
"""
which_end = _validate_end_alias(which_end)
new_val = self.asfreq('S', which_end)
base, mult = _gfc(new_val.freq)
return Timestamp(lib.period_ordinal_to_dt64(new_val.ordinal, base, mult))
@property
def year(self):
base, mult = _gfc(self.freq)
return lib.get_period_year(self.ordinal, base, mult)
@property
def month(self):
base, mult = _gfc(self.freq)
return lib.get_period_month(self.ordinal, base, mult)
@property
def qyear(self):
base, mult = _gfc(self.freq)
return lib.get_period_qyear(self.ordinal, base, mult)
@property
def quarter(self):
base, mult = _gfc(self.freq)
return lib.get_period_quarter(self.ordinal, base, mult)
@property
def day(self):
base, mult = _gfc(self.freq)
return lib.get_period_day(self.ordinal, base, mult)
@property
def week(self):
base, mult = _gfc(self.freq)
return lib.get_period_week(self.ordinal, base, mult)
@property
def weekday(self):
base, mult = _gfc(self.freq)
return lib.get_period_weekday(self.ordinal, base, mult)
@property
def day_of_week(self):
base, mult = _gfc(self.freq)
return lib.get_period_dow(self.ordinal, base, mult)
@property
def day_of_year(self):
base, mult = _gfc(self.freq)
return lib.get_period_doy(self.ordinal, base, mult)
@property
def hour(self):
base, mult = _gfc(self.freq)
return lib.get_period_hour(self.ordinal, base, mult)
@property
def minute(self):
base, mult = _gfc(self.freq)
return lib.get_period_minute(self.ordinal, base, mult)
@property
def second(self):
base, mult = _gfc(self.freq)
return lib.get_period_second(self.ordinal, base, mult)
@classmethod
def now(cls, freq=None):
return Period(datetime.now(), freq=freq)
def __repr__(self):
base, mult = _gfc(self.freq)
formatted = lib.period_ordinal_to_string(self.ordinal, base, mult)
freqstr = _freq_mod._reverse_period_code_map[base]
if mult == 1:
return "Period('%s', '%s')" % (formatted, freqstr)
return ("Period('%s', '%d%s')" % (formatted, mult, freqstr))
def __str__(self):
base, mult = _gfc(self.freq)
formatted = lib.period_ordinal_to_string(self.ordinal, base, mult)
return ("%s" % formatted)
def strftime(self, fmt):
"""
Returns the string representation of the :class:`Period`, depending
on the selected :keyword:`format`. :keyword:`format` must be a string
containing one or several directives. The method recognizes the same
directives as the :func:`time.strftime` function of the standard Python
distribution, as well as the specific additional directives ``%f``,
``%F``, ``%q``. (formatting & docs originally from scikits.timeries)
+-----------+--------------------------------+-------+
| Directive | Meaning | Notes |
+===========+================================+=======+
| ``%a`` | Locale's abbreviated weekday | |
| | name. | |
+-----------+--------------------------------+-------+
| ``%A`` | Locale's full weekday name. | |
+-----------+--------------------------------+-------+
| ``%b`` | Locale's abbreviated month | |
| | name. | |
+-----------+--------------------------------+-------+
| ``%B`` | Locale's full month name. | |
+-----------+--------------------------------+-------+
| ``%c`` | Locale's appropriate date and | |
| | time representation. | |
+-----------+--------------------------------+-------+
| ``%d`` | Day of the month as a decimal | |
| | number [01,31]. | |
+-----------+--------------------------------+-------+
| ``%f`` | 'Fiscal' year without a | \(1) |
| | century as a decimal number | |
| | [00,99] | |
+-----------+--------------------------------+-------+
| ``%F`` | 'Fiscal' year with a century | \(2) |
| | as a decimal number | |
+-----------+--------------------------------+-------+
| ``%H`` | Hour (24-hour clock) as a | |
| | decimal number [00,23]. | |
+-----------+--------------------------------+-------+
| ``%I`` | Hour (12-hour clock) as a | |
| | decimal number [01,12]. | |
+-----------+--------------------------------+-------+
| ``%j`` | Day of the year as a decimal | |
| | number [001,366]. | |
+-----------+--------------------------------+-------+
| ``%m`` | Month as a decimal number | |
| | [01,12]. | |
+-----------+--------------------------------+-------+
| ``%M`` | Minute as a decimal number | |
| | [00,59]. | |
+-----------+--------------------------------+-------+
| ``%p`` | Locale's equivalent of either | \(3) |
| | AM or PM. | |
+-----------+--------------------------------+-------+
| ``%q`` | Quarter as a decimal number | |
| | [01,04] | |
+-----------+--------------------------------+-------+
| ``%S`` | Second as a decimal number | \(4) |
| | [00,61]. | |
+-----------+--------------------------------+-------+
| ``%U`` | Week number of the year | \(5) |
| | (Sunday as the first day of | |
| | the week) as a decimal number | |
| | [00,53]. All days in a new | |
| | year preceding the first | |
| | Sunday are considered to be in | |
| | week 0. | |
+-----------+--------------------------------+-------+
| ``%w`` | Weekday as a decimal number | |
| | [0(Sunday),6]. | |
+-----------+--------------------------------+-------+
| ``%W`` | Week number of the year | \(5) |
| | (Monday as the first day of | |
| | the week) as a decimal number | |
| | [00,53]. All days in a new | |
| | year preceding the first | |
| | Monday are considered to be in | |
| | week 0. | |
+-----------+--------------------------------+-------+
| ``%x`` | Locale's appropriate date | |
| | representation. | |
+-----------+--------------------------------+-------+
| ``%X`` | Locale's appropriate time | |
| | representation. | |
+-----------+--------------------------------+-------+
| ``%y`` | Year without century as a | |
| | decimal number [00,99]. | |
+-----------+--------------------------------+-------+
| ``%Y`` | Year with century as a decimal | |
| | number. | |
+-----------+--------------------------------+-------+
| ``%Z`` | Time zone name (no characters | |
| | if no time zone exists). | |
+-----------+--------------------------------+-------+
| ``%%`` | A literal ``'%'`` character. | |
+-----------+--------------------------------+-------+
.. note::
(1)
The ``%f`` directive is the same as ``%y`` if the frequency is
not quarterly.
Otherwise, it corresponds to the 'fiscal' year, as defined by
the :attr:`qyear` attribute.
(2)
The ``%F`` directive is the same as ``%Y`` if the frequency is
not quarterly.
Otherwise, it corresponds to the 'fiscal' year, as defined by
the :attr:`qyear` attribute.
(3)
The ``%p`` directive only affects the output hour field
if the ``%I`` directive is used to parse the hour.
(4)
The range really is ``0`` to ``61``; this accounts for leap
seconds and the (very rare) double leap seconds.
(5)
The ``%U`` and ``%W`` directives are only used in calculations
when the day of the week and the year are specified.
.. rubric:: Examples
>>> a = Period(freq='Q@JUL', year=2006, quarter=1)
>>> a.strftime('%F-Q%q')
'2006-Q1'
>>> # Output the last month in the quarter of this date
>>> a.strftime('%b-%Y')
'Oct-2005'
>>>
>>> a = Period(freq='D', year=2001, month=1, day=1)
>>> a.strftime('%d-%b-%Y')
'01-Jan-2006'
>>> a.strftime('%b. %d, %Y was a %A')
'Jan. 01, 2001 was a Monday'
"""
base, mult = _gfc(self.freq)
if fmt is not None:
return lib.period_strftime(self.ordinal, base, mult, fmt)
else:
return lib.period_ordinal_to_string(self.ordinal, base, mult)
def _period_unbox(key, check=None):
'''
Period-like => int64
'''
if not isinstance(key, Period):
key = Period(key, freq=check)
elif check is not None:
if key.freq != check:
raise ValueError("%s is wrong freq" % key)
return np.int64(key.ordinal)
def _period_unbox_array(arr, check=None):
if arr is None:
return arr
unboxer = np.frompyfunc(lambda x: _period_unbox(x, check=check), 1, 1)
return unboxer(arr)
def _period_box(val, freq):
return Period(val, freq=freq)
def _period_box_array(arr, freq):
if arr is None:
return arr
if not isinstance(arr, np.ndarray):
return arr
boxfunc = lambda x: _period_box(x, freq)
boxer = np.frompyfunc(boxfunc, 1, 1)
return boxer(arr)
def dt64arr_to_periodarr(data, freq):
if data is None:
return data
if isinstance(freq, basestring):
base, mult = _gfc(freq)
else:
base, mult = freq
return lib.dt64arr_to_periodarr(data.view('i8'), base, mult)
# --- Period index sketch
class PeriodIndex(Int64Index):
"""
Immutable ndarray holding ordinal values indicating regular periods in
time such as particular years, quarters, months, etc. A value of 1 is the
period containing the Gregorian proleptic datetime Jan 1, 0001 00:00:00.
This ordinal representation is from the scikits.timeseries project.
For instance,
# construct period for day 1/1/1 and get the first second
i = Period(year=1,month=1,day=1,freq='D').asfreq('S', 'S')
i.ordinal
===> 1
Index keys are boxed to Period objects which carries the metadata (eg,
frequency information).
Parameters
----------
data : array-like (1-dimensional), optional
Optional period-like data to construct index with
dtype : NumPy dtype (default: i8)
copy : bool
Make a copy of input ndarray
freq : string or period object, optional
One of pandas period strings or corresponding objects
start : starting value, period-like, optional
If data is None, used as the start point in generating regular
period data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end value, period-like, optional
If periods is none, generated index will extend to first conforming
period on or just past end argument
"""
def __new__(cls, data=None,
freq=None, start=None, end=None, periods=None,
copy=False, name=None):
if isinstance(freq, Period):
freq = freq.freq
else:
freq = datetools.get_standard_freq(freq)
if data is None:
if start is None and end is None:
raise ValueError('Must specify start, end, or data')
start = to_period(start, freq)
end = to_period(end, freq)
is_start_intv = isinstance(start, Period)
is_end_intv = isinstance(end, Period)
if (start is not None and not is_start_intv):
raise ValueError('Failed to convert %s to period' % start)
if (end is not None and not is_end_intv):
raise ValueError('Failed to convert %s to period' % end)
if is_start_intv and is_end_intv and (start.freq != end.freq):
raise ValueError('Start and end must have same freq')
if freq is None:
if is_start_intv:
freq = start.freq
elif is_end_intv:
freq = end.freq
else:
raise ValueError('Could not infer freq from start/end')
if periods is not None:
if start is None:
data = np.arange(end.ordinal - periods + 1,
end.ordinal + 1,
dtype=np.int64)
else:
data = np.arange(start.ordinal, start.ordinal + periods,
dtype=np.int64)
else:
if start is None or end is None:
msg = 'Must specify both start and end if periods is None'
raise ValueError(msg)
data = np.arange(start.ordinal, end.ordinal+1, dtype=np.int64)
subarr = data.view(cls)
subarr.name = name
subarr.freq = freq
return subarr
if not isinstance(data, np.ndarray):
if np.isscalar(data):
raise ValueError('PeriodIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
if isinstance(data, Period):
data = [data]
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
try:
data = np.array(data, dtype='i8')
except:
data = np.array(data, dtype='O')
if freq is None:
raise ValueError('freq cannot be none')
data = _period_unbox_array(data, check=freq)
else:
if isinstance(data, PeriodIndex):
if freq is None or freq == data.freq:
freq = data.freq
data = data.values
else:
base1, mult1 = _gfc(data.freq)
base2, mult2 = _gfc(freq)
data = lib.period_asfreq_arr(data.values, base1, mult1,
base2, mult2, 'E')
else:
if freq is None:
raise ValueError('freq cannot be none')
if data.dtype == np.datetime64:
data = dt64arr_to_periodarr(data, freq)
elif data.dtype == np.int64:
pass
else:
data = data.astype('i8')
data = np.array(data, dtype=np.int64, copy=False)
if (data <= 0).any():
raise ValueError("Found illegal (<= 0) values in data")
subarr = data.view(cls)
subarr.name = name
subarr.freq = freq
return subarr
@property
def is_all_dates(self):
return True
def asfreq(self, freq=None, how='E'):
how = _validate_end_alias(how)
base1, mult1 = _gfc(self.freq)
if isinstance(freq, basestring):
base2, mult2 = _gfc(freq)
else:
base2, mult2 = freq
new_data = lib.period_asfreq_arr(self.values,
base1, mult1,
base2, mult2, how)
return PeriodIndex(new_data, freq=freq)
@property
def year(self):
base, mult = _gfc(self.freq)
return lib.get_period_year_arr(self.values, base, mult)
@property
def month(self):
base, mult = _gfc(self.freq)
return lib.get_period_month_arr(self.values, base, mult)
@property
def qyear(self):
base, mult = _gfc(self.freq)
return lib.get_period_qyear_arr(self.values, base, mult)
@property
def quarter(self):
base, mult = _gfc(self.freq)
return lib.get_period_quarter_arr(self.values, base, mult)
@property
def day(self):
base, mult = _gfc(self.freq)
return lib.get_period_day_arr(self.values, base, mult)
@property
def week(self):
base, mult = _gfc(self.freq)
return lib.get_period_week_arr(self.values, base, mult)
@property
def weekday(self):
base, mult = _gfc(self.freq)
return lib.get_period_weekday_arr(self.values, base, mult)
@property
def day_of_week(self):
base, mult = _gfc(self.freq)
return lib.get_period_dow_arr(self.values, base, mult)
@property
def day_of_year(self):
base, mult = _gfc(self.freq)
return lib.get_period_doy_arr(self.values, base, mult)
@property
def hour(self):
base, mult = _gfc(self.freq)
return lib.get_period_hour_arr(self.values, base, mult)
@property
def minute(self):
base, mult = _gfc(self.freq)
return lib.get_period_minute_arr(self.values, base, mult)
@property
def second(self):
base, mult = _gfc(self.freq)
return lib.get_period_second_arr(self.values, base, mult)
# Try to run function on index first, and then on elements of index
# Especially important for group-by functionality
def map(self, func_to_map):
try:
return func_to_map(self)
except:
return super(DatetimeIndex, self).map(func_to_map)
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return datetools._period_box_array(self, self.freq)
def to_timestamp(self, freq='D', how='start'):
"""
Cast to datetimeindex of timestamps, at *beginning* of period
Parameters
----------
how : {'s', 'e', 'start', 'end'}
Returns
-------
DatetimeIndex
"""
base, mult = _gfc(freq)
new_data = self.asfreq(freq, how)
new_data = lib.periodarr_to_dt64arr(new_data.values, base, mult)
ts_freq = _period_rule_to_timestamp_rule(self.freq, how=how)
return DatetimeIndex(new_data, freq=ts_freq)
def shift(self, n):
"""
Specialized shift which produces an PeriodIndex
Parameters
----------
n : int
Periods to shift by
freq : freq string
Returns
-------
shifted : PeriodIndex
"""
if n == 0:
return self
return PeriodIndex(data=self.values + n, freq=self.freq)
def __add__(self, other):
if isinstance(other, (int, long)):
return PeriodIndex(self.values + other, self.freq)
return super(PeriodIndex, self).__add__(other)
def __sub__(self, other):
if isinstance(other, (int, long)):
return PeriodIndex(self.values - other, self.freq)
if isinstance(other, Period):
if other.freq != self.freq:
raise ValueError("Cannot do arithmetic with "
"non-conforming periods")
return PeriodIndex(self.values - other.ordinal)
return super(PeriodIndex, self).__sub__(other)
@property
def inferred_type(self):
# b/c data is represented as ints make sure we can't have ambiguous
# indexing
return 'period'
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
try:
return super(PeriodIndex, self).get_value(series, key)
except KeyError:
try:
asdt, parsed, reso = datetools.parse_time_string(key)
grp = _freq_mod._infer_period_group(reso)
freqn = | _freq_mod._period_group(self.freq) | pandas.tseries.frequencies._period_group |
import pandas as pd
import os
import sys # ----------------------------- To handle system paths
sys.path.append('.') # ------------------- All paths till the current folder have alias as '.'
class Aircraft():
'''Class defining the object
Aircrafts. It will hold all
the aircraft data and provide
relevant methods to modify &
maintain this data'''
# Constructor for the Airport class
def __init__(self):
self.__aircraft_df = pd.DataFrame()
# Accessor to get the Airport Data
def get_AircraftData(self,path):
"""Return Airport Data"""
if os.path.isfile(path):
self.__aircraft_df = | pd.read_csv(path, encoding='utf-8') | pandas.read_csv |
import time
import pandas as pd
# Read original data
df_train = pd.read_json('./data/train.json')
df_test = | pd.read_json("./data/test.json") | pandas.read_json |
import pandas as pd
from pandas.testing import assert_frame_equal
from unittest import TestCase
from src.executor.utils import calc_target_positions
df_blended_list1 = [
pd.DataFrame([
['ETH', 0.2],
['BTC', 0.1],
], columns=['symbol', 'position']).set_index(['symbol']),
pd.DataFrame([
['ETH', 0.4],
['BTC', 0.2],
], columns=['symbol', 'position']).set_index(['symbol']),
pd.DataFrame([
['XRP', 0.1],
['BTC', 0.3],
], columns=['symbol', 'position']).set_index(['symbol'])
]
class TestExecutorCalcTargetPositions(TestCase):
def test_zero(self):
result = calc_target_positions(
0.0,
df_blended_list=df_blended_list1
)
expected = pd.DataFrame([
['BTC', 0.15],
['ETH', 0.3],
['XRP', 0.0],
], columns=['symbol', 'position']).set_index(['symbol'])
assert_frame_equal(result, expected)
def test_half(self):
result = calc_target_positions(
0.5,
df_blended_list=df_blended_list1
)
expected = pd.DataFrame([
['BTC', 0.2],
['ETH', 0.25],
['XRP', 0.025],
], columns=['symbol', 'position']).set_index(['symbol'])
| assert_frame_equal(result, expected) | pandas.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 28 22:33:19 2021
@author: zishi
"""
import pandas as pd
def ensemble_t(df_list,mode='soft'):
df = pd.concat(df_list)
df_mean = df.groupby(df.index).mean()
if mode == 'soft':
result = pd.DataFrame(df_mean['proba'],index=df_list[0].index)
else:
result = | pd.DataFrame(df_mean['label'],index=df_list[0].index) | pandas.DataFrame |
from Model.BERT_BILSTM_CRF import BERTBILSTMCRF
from Model.BILSTM_Attetion_CRF import BILSTMAttentionCRF
from Model.BILSTM_CRF import BILSTMCRF
from Model.IDCNN_CRF import IDCNNCRF
from Model.IDCNN5_CRF import IDCNNCRF2
from sklearn.metrics import f1_score, recall_score
import numpy as np
import pandas as pd
from Public.utils import *
from keras.callbacks import EarlyStopping
from DataProcess.process_data import DataProcess
max_len = 100
def train_sample(train_model='BERTBILSTMCRF',
# ['BERTBILSTMCRF', 'BILSTMAttentionCRF', 'BILSTMCRF',
# 'IDCNNCRF', 'IDCNNCRF2']
epochs=15,
log = None,
):
# bert需要不同的数据参数 获取训练和测试数据
if train_model == 'BERTBILSTMCRF':
dp = DataProcess(data_type='msra', max_len=max_len, model='bert')
else:
dp = DataProcess(data_type='msra', max_len=max_len)
train_data, train_label, test_data, test_label = dp.get_data(one_hot=True)
log.info("----------------------------数据信息 START--------------------------")
log.info(f"当前使用数据集 MSRA")
# log.info(f"train_data:{train_data.shape}")
log.info(f"train_label:{train_label.shape}")
# log.info(f"test_data:{test_data.shape}")
log.info(f"test_label:{test_label.shape}")
log.info("----------------------------数据信息 END--------------------------")
if train_model == 'BERTBILSTMCRF':
model_class = BERTBILSTMCRF(dp.vocab_size, dp.tag_size, max_len=max_len)
elif train_model == 'BILSTMAttentionCRF':
model_class = BILSTMAttentionCRF(dp.vocab_size, dp.tag_size)
elif train_model == 'BILSTMCRF':
model_class = BILSTMCRF(dp.vocab_size, dp.tag_size)
elif train_model == 'IDCNNCRF':
model_class = IDCNNCRF(dp.vocab_size, dp.tag_size, max_len=max_len)
else:
model_class = IDCNNCRF2(dp.vocab_size, dp.tag_size, max_len=max_len)
model = model_class.creat_model()
callback = TrainHistory(log=log, model_name=train_model) # 自定义回调 记录训练数据
early_stopping = EarlyStopping(monitor='val_crf_viterbi_accuracy', patience=2, mode='max') # 提前结束
model.fit(train_data, train_label, batch_size=32, epochs=epochs,
validation_data=[test_data, test_label],
callbacks=[callback, early_stopping])
# 计算 f1 和 recall值
pre = model.predict(test_data)
pre = np.array(pre)
test_label = np.array(test_label)
pre = np.argmax(pre, axis=2)
test_label = np.argmax(test_label, axis=2)
pre = pre.reshape(pre.shape[0] * pre.shape[1], )
test_label = test_label.reshape(test_label.shape[0] * test_label.shape[1], )
f1score = f1_score(pre, test_label, average='macro')
recall = recall_score(pre, test_label, average='macro')
log.info("================================================")
log.info(f"--------------:f1: {f1score} --------------")
log.info(f"--------------:recall: {recall} --------------")
log.info("================================================")
# 把 f1 和 recall 添加到最后一个记录数据里面
info_list = callback.info
if info_list and len(info_list)>0:
last_info = info_list[-1]
last_info['f1'] = f1score
last_info['recall'] = recall
return info_list
if __name__ == '__main__':
# 需要测试的模型
train_modes = ['IDCNNCRF', 'IDCNNCRF2', 'BILSTMAttentionCRF', 'BILSTMCRF', 'BERTBILSTMCRF']
# 定义文件路径(以便记录数据)
log_path = os.path.join(path_log_dir, 'train_log.log')
df_path = os.path.join(path_log_dir, 'df.csv')
log = create_log(log_path)
# 训练同时记录数据写入的df文件中
columns = ['model_name','epoch', 'loss', 'acc', 'val_loss', 'val_acc', 'f1', 'recall']
df = | pd.DataFrame(columns=columns) | pandas.DataFrame |
import altair as alt
import pandas as pd
import numpy as np
from datetime import date, timedelta
from os import path
from io import StringIO
from flask import current_app as app
import redis, requests, time, pyarrow
def connect():
return redis.Redis( host=app.config['REDIS_HOST'], port=app.config['REDIS_PORT'] )
#
# A simple label placement algorithm, to avoid overlaps
#
# A conventional approach might use scipy.optimize, but I want
# to avoid dependence on something so big as scipy just for this one
# purpose.
#
class label_placement:
def __init__(self, xcol, ycol):
xy = np.column_stack( [xcol.to_numpy(), ycol.to_numpy()] )
dxy = (xy.max(axis=0) - xy.min(axis=0))/40
r0 = xy/dxy
#
# Default starts above mark
#
phi = np.array([np.pi/2]*len(r0))
#
# Push away from closest neighbor
#
i0, i1 = np.triu_indices(r0.shape[0], k=1)
r = r0 + [0,1] # = [cos(phi),sin(phi)]
for attempt in range(0,100):
#
# Find worst collision
#
id2 = np.argmin(((r[i0,:]-r[i1,:])**2).sum(axis=1))
t0 = i0[id2]
t1 = i1[id2]
#
# We're finished if the collision isn't too bad
#
dr = r[t0,:] - r[t1,:]
if (dr**2).sum() > 2: break
#
# Move each by delta in phi
#
if np.cos(phi[t0]) * dr[1] - np.sin(phi[t0]) * dr[0] > 0:
phi[t0] += np.pi/6
else:
phi[t0] -= np.pi/6
r[t0,:] = r0[t0,:] + [np.cos(phi[t0]), np.sin(phi[t0])]
if np.cos(phi[t1]) * dr[1] - np.sin(phi[t1]) * dr[0] < 0:
phi[t1] += np.pi/6
else:
phi[t1] -= np.pi/6
r[t1,:] = r0[t1,:] + [np.cos(phi[t1]), np.sin(phi[t1])]
self.xy = r * dxy
def X(self):
return self.xy[:,0]
def Y(self):
return self.xy[:,1]
def fetchState(rconn,key):
context = pyarrow.default_serialization_context()
#
# Check date of main dataframe
#
expires = rconn.hget("state"+key,"expires")
if expires and time.time() < float(expires):
return context.deserialize(rconn.hget("state"+key,"dataframe"))
#
# Fetch
# Make sure we include a user agent. We are limited to 50,000 records per query,
# but that should be plenty for this table (which has rows per day)
#
req = requests.get(
"https://data.cdc.gov/resource/9mfq-cb36.csv",
params={
'state': key,
'$limit': 5000,
'$select': "submission_date,state,new_case,new_death",
"$$app_token": app.config['SOCRATA_TOKEN']
},
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:77.0) Gecko/20100101 Firefox/77.0'}
)
if req.status_code != 200:
raise Exception("Request failure: {}".format(req.status_code))
answer = pd.read_csv(StringIO(req.text), parse_dates=["submission_date"]).rename(columns={
'submission_date': 'dt'
})
answer = answer.sort_values('dt')
#
# Save
#
rconn.hset("state"+key,"dataframe",context.serialize(answer).to_buffer().to_pybytes())
rconn.hset("state"+key,"expires",str(time.time()+600.0))
return answer
def fetchRecent(rconn):
context = pyarrow.default_serialization_context()
#
# Check date of main dataframe
#
expires = rconn.hget("staterecent","expires")
if expires and time.time() < float(expires):
return context.deserialize(rconn.hget("staterecent","dataframe"))
#
# Fetch
# Make sure we include a user agent. We are limited to 50,000 records per query,
# but that should be plenty for this table (which has rows per day)
#
# Fetch starting from 10 days ago, to ensure we get at least seven
#
start = date.today() - timedelta(days=11)
req = requests.get(
"https://data.cdc.gov/resource/9mfq-cb36.csv",
params={
'$where': "submission_date > '{:4d}-{:02d}-{:02d}'".format(start.year,start.month,start.day),
'$limit': 5000,
'$select': "submission_date,state,new_case,new_death",
"$$app_token": app.config['SOCRATA_TOKEN']
},
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:77.0) Gecko/20100101 Firefox/77.0'}
)
if req.status_code != 200:
raise Exception("Request failure: {}".format(req.status_code))
answer = pd.read_csv(StringIO(req.text), parse_dates=["submission_date"]).rename(columns={
'submission_date': 'dt'
})
#
# We actually get some odd "states". Let's remove them.
#
namefile = path.join(app.config['DATA_DIR'],"state-abbre.csv")
valid = pd.read_csv(namefile)
answer = answer[answer.state.isin(valid.Code)]
#
# Sort
#
answer = answer.sort_values('dt')
#
# Save
#
rconn.hset("staterecent","dataframe",context.serialize(answer).to_buffer().to_pybytes())
rconn.hset("staterecent","expires",str(time.time()+600.0))
return answer
def fetchHospital(rconn,key):
context = pyarrow.default_serialization_context()
#
# See: https://dev.socrata.com/foundry/healthdata.gov/g62h-syeh
#
#
# Check date of main dataframe
#
expires = rconn.hget("statehos"+key,"expires")
if expires and time.time() < float(expires):
return context.deserialize(rconn.hget("statehos"+key,"dataframe"))
#
# Fetch
# Make sure we include a user agent. We are limited to 50,000 records per query,
# but that should be plenty for this table (which has rows per day)
#
# The HHS sure loves long column names...
#
columns = [
'date',
'state',
'inpatient_beds',
'inpatient_beds_used',
'inpatient_beds_used_covid',
'staffed_icu_adult_patients_confirmed_and_suspected_covid',
'total_staffed_adult_icu_beds',
]
req = requests.get(
"https://healthdata.gov/resource/g62h-syeh.csv",
params={
'state': key,
'$limit': 5000,
'$select': ",".join(columns),
"$$app_token": app.config['SOCRATA_TOKEN']
},
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:77.0) Gecko/20100101 Firefox/77.0'}
)
if req.status_code != 200:
raise Exception("Request failure: {}".format(req.status_code))
answer = pd.read_csv(StringIO(req.text), parse_dates=["date"]).rename(columns={
'date': 'dt'
})
answer = answer.sort_values('dt')
#
# Save
#
rconn.hset("statehos"+key,"dataframe",context.serialize(answer).to_buffer().to_pybytes())
rconn.hset("statehos"+key,"expires",str(time.time()+600.0))
return answer
def fetchVaccine(rconn,key):
context = pyarrow.default_serialization_context()
#
# See: https://dev.socrata.com/foundry/data.cdc.gov/unsk-b7fc
#
#
# Check date of main dataframe
#
expires = rconn.hget("statevac"+key,"expires")
if expires and time.time() < float(expires):
return context.deserialize(rconn.hget("statevac"+key,"dataframe"))
#
# Fetch
# Make sure we include a user agent. We are limited to 50,000 records per query,
# but that should be plenty for this table (which has rows per day)
#
columns = [
'Date',
'administered_dose1_recip',
'series_complete_yes'
]
req = requests.get(
" https://data.cdc.gov/resource/unsk-b7fc.csv",
params={
'Location': key,
'$limit': 5000,
'$select': ",".join(columns),
"$$app_token": app.config['SOCRATA_TOKEN']
},
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:77.0) Gecko/20100101 Firefox/77.0'}
)
if req.status_code != 200:
raise Exception("Request failure: {}".format(req.status_code))
answer = pd.read_csv(StringIO(req.text), parse_dates=["Date"]).rename(columns={
'Date': 'date',
'administered_dose1_recip': 'onedose',
'series_complete_yes': 'complete'
}).sort_values('date')
#
# Get corresponding FIPS code
#
fipfile = path.join(app.config['DATA_DIR'],"fips-code.csv")
fips = pd.read_csv(fipfile,sep="\t")
fip_code = fips[fips['key']==key].code.values[0]
#
# Use this to fetch population by age
#
popfile = path.join(app.config['DATA_DIR'],"sc-est2019-agesex-civ.csv")
pop = pd.read_csv(popfile).filter(items=[
'STATE', 'SEX', 'AGE', 'POPEST2019_CIV'
])
pop = pop[(pop.STATE == fip_code) & (pop.SEX == 0)]
#
# Convert to total population
#
pop12 = pop[(pop.AGE >= 12) & (pop.AGE < 900)]['POPEST2019_CIV'].sum()
pop16 = pop[(pop.AGE >= 16) & (pop.AGE < 900)]['POPEST2019_CIV'].sum()
#
# Merge
#
answer['eligible'] = np.where(answer['date'] > pd.to_datetime(date(2021,5,10)), pop12, pop16)
#
# Save
#
rconn.hset("statevac"+key,"dataframe",context.serialize(answer).to_buffer().to_pybytes())
rconn.hset("statevac"+key,"expires",str(time.time()+600.0))
return answer
def fetchRecentVaccine(rconn):
context = pyarrow.default_serialization_context()
#
# See: https://dev.socrata.com/foundry/data.cdc.gov/unsk-b7fc
#
#
# Check date of main dataframe
#
expires = rconn.hget("staterecvac","expires")
if expires and time.time() < float(expires):
return context.deserialize(rconn.hget("staterecvac","dataframe"))
#
# Fetch, sorted by date, to get most recent results, and fetch enough
# to cover all the states
#
columns = [
'Date',
'Location',
'administered_dose1_recip',
'series_complete_yes'
]
req = requests.get(
"https://data.cdc.gov/resource/unsk-b7fc.csv",
params={
'$order': "Date DESC",
'$limit': 200,
'$select': ",".join(columns),
"$$app_token": app.config['SOCRATA_TOKEN']
},
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:77.0) Gecko/20100101 Firefox/77.0'}
)
if req.status_code != 200:
raise Exception("Request failure: {}".format(req.status_code))
answer = pd.read_csv(StringIO(req.text), parse_dates=["Date"]).rename(columns={
'Date': 'date',
'Location': 'key',
'administered_dose1_recip': 'onedose',
'series_complete_yes': 'complete'
})
#
# Only keep most recent results
#
answer = answer.sort_values(['key','date'], ascending=False).drop_duplicates(subset=["key"])
#
# Merge with FIPS
#
fipfile = path.join(app.config['DATA_DIR'],"fips-code.csv")
fips = pd.read_csv(fipfile,sep="\t")
answer = answer.merge(fips,on="key")
#
# Get population > 12 years old
#
popfile = path.join(app.config['DATA_DIR'],"sc-est2019-agesex-civ.csv")
pop = pd.read_csv(popfile).filter(items=[
'STATE', 'SEX', 'AGE', 'POPEST2019_CIV'
])
pop = pop[(pop.AGE >= 12) & (pop.AGE < 900) & (pop.SEX == 0)].filter(
items=("STATE","POPEST2019_CIV")
).rename(columns={
'STATE': 'code',
'POPEST2019_CIV': 'eligible'
})
pop = pop.groupby(by="code").sum().reset_index()
#
# Merge into answer
#
answer = answer.merge(pop,on="code")
#
# Save
#
rconn.hset("staterecvac","dataframe",context.serialize(answer).to_buffer().to_pybytes())
rconn.hset("staterecvac","expires",str(time.time()+600.0))
return answer
def fetchPopulation(rconn):
context = pyarrow.default_serialization_context()
if rconn.hexists("state","population"):
return context.deserialize(rconn.hget("state","population"))
#
# Create for first time
#
popfile = path.join(app.config['DATA_DIR'],"pop-est2019.csv")
namefile = path.join(app.config['DATA_DIR'],"state-abbre.csv")
pop = pd.read_csv(popfile).merge(pd.read_csv(namefile).rename(columns={'State':'NAME'}),on="NAME")
pop = pop.filter(items=("NAME","Code","POPESTIMATE2010")).rename(columns={"Code":"state"})
rconn.hset("state","population",context.serialize(pop).to_buffer().to_pybytes())
return pop
def fetchPolitics(rconn):
context = pyarrow.default_serialization_context()
if rconn.hexists("state","politics"):
return context.deserialize(rconn.hget("state","politics"))
politicsfile = path.join(app.config['DATA_DIR'],"state-party-affiliation.csv")
namefile = path.join(app.config['DATA_DIR'],"state-abbre.csv")
pol = pd.read_csv(politicsfile, sep="\t")
pol = pol.merge(pd.read_csv(namefile).rename(columns={'State':'state'}),on="state")
rconn.hset("state","politics",context.serialize(pol).to_buffer().to_pybytes())
return pol
def menu():
r = connect()
pop = fetchPopulation(r)
return {
'abbrev': dict(zip(pop.state,pop.NAME)),
'default': "CA"
}
def plot(code):
r = connect()
dt = fetchState(r,code)
pop = fetchPopulation(r)
dt = dt[dt.dt >= pd.to_datetime(date(2020,3,1))]
#
# This are fake, so we can make a legend
#
dt['src1'] = "Daily"
dt['src2'] = "7 day"
title = pop[pop.state==code].NAME.to_string(index=False).strip()
case_items = ("dt","new_case","croll","src1","src2")
#
# Here we use a scale from 0 to min(daily.max(),rolling.max()*1.5),
# in order to protect from wild daily corrections
#
def case_plot(chart):
fake_scale = alt.Scale(domain=('Daily','7 day'), range=('lightgrey','blue'))
case_points = chart.mark_line(point=True,clip=True).encode(
x = alt.X("dt:T",title="Date"),
y = alt.Y(
"new_case:Q",
title="Cases",
scale = alt.Scale(domain=[0,min(dt.new_case.max(),dt.croll.max()*1.5)])
),
color = alt.Color("src1", scale=fake_scale)
)
case_average = chart.mark_line(clip=True).encode(
x = alt.X('dt:T'),
y = alt.Y('croll:Q'),
color = alt.Color("src2", scale=fake_scale)
)
return (case_points + case_average).properties(width=500, height=200, title=title)
dt['croll'] = dt.new_case.rolling(window=7).mean()
dt['droll'] = dt.new_death.rolling(window=7).mean()
chart = alt.Chart(dt.filter(
items = case_items + ("new_death","droll")
))
top = case_plot(chart)
death_points = chart.mark_line(
point = {"color": "lightgrey"},
color = "lightgrey",
clip = True
).encode(
x = alt.X("dt:T", title="Date"),
y = alt.Y(
"new_death:Q",
title = "Fatalities",
scale = alt.Scale(domain=[0,min(dt.new_death.max(),dt.droll.max()*1.5)])
)
)
death_average = chart.mark_line(clip=True).encode(
x = alt.X('dt:T'),
y = alt.Y('droll:Q')
)
bot = (death_points + death_average).properties(width=500, height=200)
return (top & bot).configure_legend(title=None).to_dict()
def hospitals(code):
r = connect()
dt = fetchState(r,code)
dth = fetchHospital(r,code)
pop = fetchPopulation(r)
dt = dt[dt.dt >= pd.to_datetime(date(2020,3,1))]
dth = dth[dth.dt >= pd.to_datetime(date(2020,3,1))]
#
# This are fake, so we can make a legend
#
dt['src1'] = "Daily"
dt['src2'] = "7 day"
title = pop[pop.state==code].NAME.to_string(index=False).strip()
case_items = ("dt","new_case","croll","src1","src2")
#
# Here we use a scale from 0 to min(daily.max(),rolling.max()*1.5),
# in order to protect from wild daily corrections
#
def case_plot(chart):
fake_scale = alt.Scale(domain=('Daily','7 day'), range=('lightgrey','blue'))
case_points = chart.mark_line(point=True,clip=True).encode(
x = alt.X("dt:T",title="Date"),
y = alt.Y(
"new_case:Q",
title="Cases",
scale = alt.Scale(domain=[0,min(dt.new_case.max(),dt.croll.max()*1.5)])
),
color = alt.Color("src1", scale=fake_scale)
)
case_average = chart.mark_line(clip=True).encode(
x = alt.X('dt:T'),
y = alt.Y('croll:Q'),
color = alt.Color("src2", scale=fake_scale)
)
return (case_points + case_average).properties(width=500, height=200, title=title)
dt['croll'] = dt.new_case.rolling(window=7).mean()
chart = alt.Chart(dt.filter(items = case_items))
top = case_plot(chart)
#
# We'll fold the data for the hospital plot
#
dt_1 = dth.filter(items=("dt","staffed_icu_adult_patients_confirmed_and_suspected_covid")).rename(
columns={"staffed_icu_adult_patients_confirmed_and_suspected_covid":"icu"}
)
dt_2 = dth.filter(items=("dt","total_staffed_adult_icu_beds")).rename(
columns={"total_staffed_adult_icu_beds":"icu"}
)
dt_1['label'] = 'COVID-19'
dt_2['label'] = 'Total'
chart2 = alt.Chart(pd.concat([dt_1,dt_2])).mark_line().encode(
x = alt.X("dt:T", title="Date"),
y = alt.Y(
"icu:Q",
title = "Staffed ICU beds"
),
color = alt.Color("label",scale=alt.Scale(
domain=('Total','COVID-19'),
range=('darkblue','darkorange')
))
)
bot = chart2.properties(width=500, height=200)
return (top & bot).resolve_scale(color='independent').configure_legend(title=None).to_dict()
def vaccines(code):
r = connect()
dt = fetchVaccine(r,code)
dt['onedose_pop'] = dt['onedose'] / dt['eligible']
dt['complete_pop'] = dt['complete'] / dt['eligible']
#
# Serialize, and also eliminate data rows with zeros, which appear to be
# placeholders for "no data".
#
dt1 = dt[dt['onedose'] > 0].filter(items=['date','onedose_pop']).copy().rename(columns={
'onedose_pop': 'frac'
})
dt1['status'] = 'One dose'
dt2 = dt[dt['complete'] > 0].filter(items=['date','complete_pop']).copy().rename(columns={
'complete_pop': 'frac'
})
dt2['status'] = 'Fully vaccinated'
#
# Title
#
pop = fetchPopulation(r)
title = pop[pop.state==code].NAME.to_string(index=False).strip()
#
# Build chart
#
chart = alt.Chart(pd.concat([dt1,dt2])).mark_line().encode(
x = alt.X('date:T', title="Date"),
y = alt.Y('frac:Q', title="Percent eligible", axis=alt.Axis(format='%')),
color = alt.Color('status:N')
)
return chart.properties(width=500, height=300, title=title).configure_legend(title=None).to_dict()
def vaccines_bar():
r = connect()
dt = fetchRecentVaccine(r)
dt['onedose'] = dt['onedose'] / dt['eligible']
dt['complete'] = dt['complete'] / dt['eligible']
reduced = dt.filter(items=[
'key', 'onedose', 'complete'
]).sort_values(by="key")
datestamp = pd.to_datetime(dt['date'].values[0]).strftime('%D')
chart = alt.Chart(reduced)
top = chart.mark_bar().encode(
x = alt.X("key:N",title="State"),
y = alt.Y("onedose:Q", title="Percent eligible", axis=alt.Axis(format='%'), scale=alt.Scale(domain=[0,1]))
).properties(
width = 600,
height = 200,
title = "At least one dose {}".format(datestamp)
)
bottom = chart.mark_bar().encode(
x = alt.X("key:N",title="State"),
y = alt.Y("complete:Q", title="Percent eligible", axis=alt.Axis(format='%'), scale=alt.Scale(domain=[0,1]))
).properties(
width = 600,
height = 200,
title = "Fully vaccinated {}".format(datestamp)
)
return (top & bottom).to_dict()
def vaccines_by_party():
r = connect()
dt = fetchRecentVaccine(r)
pol = fetchPolitics(r).filter(items=("Code","democrat")).rename(columns={"Code":"key"})
dt = dt.merge(pol,on="key")
dt['onedose'] = dt['onedose'] / dt['eligible']
dt['complete'] = dt['complete'] / dt['eligible']
dt['democrat'] = dt['democrat'] / 100.0
reduced = dt.filter(items=[
'key', 'onedose', 'complete', "democrat"
]).sort_values(by="key")
datestamp = | pd.to_datetime(dt['date'].values[0]) | pandas.to_datetime |
############################################################################################
# FileName [ comut_plot_analysis.py ]
# PackageName [ lib/analysis ]
# Synopsis [ Implement CoMut analysis. ]
# Author [ <NAME> ]
# Copyright [ 2021 9 ]
############################################################################################
from ..maf_filter import fast_read_maf
from termcolor import colored
import pandas as pd
import os
#########################################################
# #
# python3 mafAnalysis.py \ #
# -f examples/test_data/maf/TCGA_test.maf \ #
# -cm 60456963 \ #
# -o examples/output \ #
# -p examples/pic/ #
# #
#########################################################
class CoMutAnalysis:
'''CoMut plot analysis
Arguments:
maf_file {string} -- The input MAF file for all data.
output_folder {string} -- The path for output files.
length {int} -- The length of genome (WES = 60456963)
Parameters:
self.head {string} -- The column names of MAF file.
self.df {pd.DataFrame} -- The data for the MAF file.
Outputs:
mutation_data.tsv
mutation_classification.tsv
'''
def __init__(self, maf_file):
print(colored(('\nStart CoMut_Plot_Analysis....'), 'yellow'))
self.head, self.df = fast_read_maf(maf_file)
def data_analysis(self, output_folder, length):
def mutation_type():
maf = self.df
chosen_col = maf[['Tumor_Sample_Barcode','Hugo_Symbol','Variant_Classification']]
chosen_col = chosen_col.rename({'Tumor_Sample_Barcode':'sample', 'Hugo_Symbol':'category', 'Variant_Classification':'value'}, axis=1)
value_old_list = ['Missense_Mutation', 'Nonsense_Mutation','In_Frame_Del', 'In_Frame_Ins','Splice_Site',
'Silent','Frame_Shift_Del','Frame_Shift_Ins','Nonstop_Mutation','Translation_Start_Site']
remove_idx = []
for i in range(len(chosen_col['value'])):
if chosen_col['value'][i] not in value_old_list:
remove_idx.append(i)
else:
if chosen_col['value'][i] == 'Missense_Mutation':
chosen_col['value'][i] = 'Missense'
elif chosen_col['value'][i] == 'Nonsense_Mutation':
chosen_col['value'][i] = 'Nonsense'
elif chosen_col['value'][i] == 'Nonstop_Mutation':
chosen_col['value'][i] = 'Nonstop'
elif chosen_col['value'][i] == 'In_Frame_Del' or chosen_col['value'][i] == 'In_Frame_Ins':
chosen_col['value'][i] = 'In frame indel'
elif chosen_col['value'][i] == 'Frame_Shift_Del' or chosen_col['value'][i] == 'Frame_Shift_Ins':
chosen_col['value'][i] = 'Frameshift indel'
elif chosen_col['value'][i] == 'Splice_Site':
chosen_col['value'][i] = 'Splice site'
elif chosen_col['value'][i] == 'Translation_Start_Site':
chosen_col['value'][i] = 'Translation start site'
chosen_col = chosen_col.drop(chosen_col.index[remove_idx])
# print(chosen_col)
unique_chosen_col = chosen_col.drop_duplicates()
# print(unique_chosen_col)
# os._exit()
unique_chosen_col.to_csv(output_folder+'mutation_data.tsv', sep = '\t', index = False)
print(colored(('=> Generate CoMut_Analysis output files:'), 'green'))
print(colored((' '+output_folder+'mutation_data.tsv'), 'green'))
def mutation_clonality():
mutation_type_file = pd.read_csv(output_folder+'mutation_data.tsv', sep='\t', header=0)
sample_dict = dict()
for idx, data in mutation_type_file.iterrows():
if data['sample'] not in sample_dict:
sample_dict[data['sample']] = [0, 0]
if data['value'] == 'Silent':
sample_dict[data['sample']][1]+=1
else:
sample_dict[data['sample']][0]+=1
for s in sample_dict:
sample_dict[s][0] = sample_dict[s][0]*1000000/length
sample_dict[s][1] = sample_dict[s][1]*1000000/length
mutation_clone = pd.DataFrame.from_dict(sample_dict, orient='index')
mutation_clone.reset_index(level=0, inplace=True)
mutation_clone.to_csv(output_folder+'mutation_classification.tsv', sep='\t', header=['sample', 'Nonsynonymous', 'Synonymous'], index=False)
print(colored((' '+output_folder+'mutation_classification.tsv'+'\n'), 'green'))
mutation_type()
mutation_clonality()
from comut import comut
from comut import fileparsers
#############################################################################
# #
# python3 mafAnalysis.py \ #
# -cmp examples/tsv/comut.tsv examples/tsv/comut_info.tsv 0 comut.pdf \ #
# -o examples/output \ #
# -p examples/pic/ #
# #
#############################################################################
class CoMutPlot:
''' CoMut plot plotting
Arguments:
tsv_file {string} -- A TSV file includes all file paths that need for plotting.
tsv_info {string} -- A TSV file includes all informations for plotting.
pic {string} -- The path for storing comut plot.
theme {string} -- The color theme. (0: cold, 1: warm)
comut_name {string} -- The file name of CoMut plot.
Parameters:
self.fd {dict} -- The file dictionary for each subplot.
self.info {dict} -- The information dictionary for each subplot.
Pictures:
An image of CoMut plot.
'''
def __init__(self, tsv_file, tsv_info):
print(colored(('\nStart plotting CoMut Plot....'), 'yellow'))
self.fd = (pd.read_csv(tsv_file, sep='\t')).to_dict('list')
for item in self.fd:
cleanedList = [x for x in self.fd[item] if str(x) != 'nan']
self.fd[item] = cleanedList
self.info = ( | pd.read_csv(tsv_info, sep='\t') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 1 16:28:33 2021
@author: yeabinmoon
"""
import pandas as pd
from safegraph_py_functions import safegraph_py_functions as sgpy
import time
df = | pd.read_pickle('/Users/yeabinmoon/Documents/JMP/data/SafeGraph/POI/temp/temp2.pickle.gz') | pandas.read_pickle |
#!/usr/bin/env python3
"""
Normalize UCI computer hardware dataset (http://archive.ics.uci.edu/ml/datasets/Computer+Hardware)
1. vendor name: 30
(adviser, amdahl,apollo, basf, bti, burroughs, c.r.d, cambex, cdc, dec,
dg, formation, four-phase, gould, honeywell, hp, ibm, ipl, magnuson,
microdata, nas, ncr, nixdorf, perkin-elmer, prime, siemens, sperry,
sratus, wang)
2. Model Name: many unique symbols
3. MYCT: machine cycle time in nanoseconds (integer)
4. MMIN: minimum main memory in kilobytes (integer)
5. MMAX: maximum main memory in kilobytes (integer)
6. CACH: cache memory in kilobytes (integer)
7. CHMIN: minimum channels in units (integer)
8. CHMAX: maximum channels in units (integer)
9. PRP: published relative performance (integer)
10. ERP: estimated relative performance from the original article (integer)
"""
import argparse
import pandas as pd
from logzero import logger
def main(args):
""" Main entry point of the app """
data_path = args.data_path
output_path = args.output_path
machine_data_df = | pd.read_csv(data_path, header='infer') | pandas.read_csv |
import pandas as pd
import numpy as np
from keras.models import load_model
from sklearn.metrics import roc_curve, roc_auc_score, auc, precision_recall_curve, average_precision_score
import os
import pickle
from scipy.special import softmax
from prg import prg
class MetricsGenerator(object):
def __init__(self, dataset_dir, model_dir, metrics_dir):
self._model_dir = model_dir
self._metrics_dir = metrics_dir
self._train_x = pd.read_csv(dataset_dir + "train_x.csv")
self._test_x = pd.read_csv(dataset_dir + "test_x.csv")
self._train_x = self._train_x.drop(self._train_x.columns[0], axis=1)
self._test_x = self._test_x.drop(self._test_x.columns[0], axis=1)
self._train_y = pd.read_csv(dataset_dir + "train_y.csv")
self._test_y = pd.read_csv(dataset_dir + "test_y.csv")
def generate_metrics_for_model(self, model):
error_df = self.get_error_df(model)
roc_df, roc_auc_df = self.get_roc_and_auc_df(error_df)
precision_recall_df, precision_recall_auc_df, average_precision_score_df = self.get_precision_recall_and_auc_df(error_df)
prg_df, prg_auc_df = self.get_prg_and_auc_df(error_df)
history_df = self.get_history_df(model)
self.create(self._metrics_dir + "model" + str(model))
self.store_df("error_df", model,error_df)
self.store_df("roc_df", model, roc_df)
self.store_df("roc_auc_df", model, roc_auc_df)
self.store_df("precision_recall_df", model, precision_recall_df)
self.store_df("precision_recall_auc_df", model, precision_recall_auc_df)
self.store_df("average_precision_score_df", model, average_precision_score_df)
self.store_df("prg_df", model, prg_df)
self.store_df("prg_auc_df", model, prg_auc_df)
self.store_df("history_df", model, history_df)
def get_error_df(self, model):
model = load_model(self._model_dir + "model" + str(model) + ".h5")
test_x_predicted = model.predict(self._test_x)
mse = np.mean(np.power(self._test_x - test_x_predicted, 2), axis = 1)
error_df = pd.DataFrame({'Reconstruction_error':mse, 'True_values': self._test_y['target']})
return error_df
def get_roc_and_auc_df(self, error_df):
false_pos_rate, true_pos_rate, thresholds = roc_curve(error_df.True_values, error_df.Reconstruction_error)
i = np.arange(len(true_pos_rate))
roc_df = pd.DataFrame({'FPR': pd.Series(false_pos_rate, index=i), 'TPR': pd.Series(true_pos_rate, index=i), 'Threshold': pd.Series(thresholds, index=i)})
roc_auc = roc_auc_score(error_df.True_values, error_df.Reconstruction_error)
i = np.arange(1)
roc_auc_df = pd.DataFrame({'AUC': pd.Series(roc_auc, index=i)})
return roc_df, roc_auc_df
def get_precision_recall_and_auc_df(self, error_df):
precision, recall, thresholds = precision_recall_curve(error_df.True_values, error_df['Reconstruction_error'])
precision = precision[:-1]
recall = recall[:-1]
i = np.arange(len(precision))
precision_recall_df = pd.DataFrame({'Precision': pd.Series(precision, index=i), 'Recall':pd.Series(recall, index=i), 'Threshold':pd.Series(thresholds, index=i)})
i = np.arange(1)
precision_recall_auc = auc(recall, precision)
precision_recall_auc_df = pd.DataFrame({'AUC': pd.Series(precision_recall_auc, index=i)})
average_precision = average_precision_score(error_df.True_values, error_df.Reconstruction_error)
average_precision_score_df = pd.DataFrame({'AP': | pd.Series(average_precision, index=i) | pandas.Series |
import numpy as np
import pandas as pd
from EvaluationFunctions.LoadFrameworkDesignsFilenames import load_framework_designs_filenames
from EvaluationFunctions.LoadCompetitorsFilenames import load_competitors_filenames
from EvaluationFunctions.Load_withIterations_Results import load_with_iterations_results
from Evaluation.Plot_TimeComplexity import plot_time_complexity
# 0. Read in file names of experiment
experiments = ["10Dim", "50Dim", "100Dim", "500Dim", "1000Dim"]
competitors = True
times_per_example = []
df_times_per_example = pd.DataFrame()
for experiment in experiments:
time_per_example_experiment = {}
# 1. Read in File names
if competitors:
path, dataset, result_file_names = load_competitors_filenames(experiment=experiment)
file_names = ["FILE_SAW_NewAE", "FILE_SAW_RetrainAE", "FILE_Baseline_ADWIN10", "FILE_Baseline_ADWIN10-initialized",
"FILE_Competitor_IBDD", "FILE_Competitor_D3"]
result_folders = ["SAW_Autoencoder_ADWIN_Training", "SAW_Autoencoder_ADWIN_Training", "Baseline_MultipleADWINS",
"Baseline_MultipleADWINS", "Competitor_IBDD", "Competitor_D3"]
experiment_names = ["SAW (NAE-IAW)", "SAW (RAE-IAW)", "ADWIN-10", "ADWIN-10i", "IBDD", "D3"]
else:
path, dataset, result_file_names = load_framework_designs_filenames(experiment=experiment)
file_names = ["FILE_TrainNewAE_KeepADWIN", "FILE_TrainNewAE_InitializeADWIN",
"FILE_TrainNewAE_InitializeAndFeedADWIN",
"FILE_RetrainAE_KeepADWIN", "FILE_RetrainAE_InitializeADWIN", "FILE_RetrainAE_InitializeAndFeedADWIN"]
result_folders = ["SAW_Autoencoder_ADWIN_Training"] * 6
experiment_names = ["NAE-KAW", "NAE-IAW", "NAE-RAW", "RAE-KAW", "RAE-IAW", "RAE-RAW"]
# 2. Read in Files and generate evaluation metrics
for experiment_idx in range(len(file_names)):
if result_file_names[file_names[experiment_idx]] != '-':
evaluation_results = load_with_iterations_results(
file_name=result_file_names[file_names[experiment_idx]], result_folder=result_folders[experiment_idx])
time_per_example = np.round(np.mean(evaluation_results['Time per Example']), 4)
time_per_example_experiment[experiment_names[experiment_idx]] = time_per_example
else:
time_per_example_experiment[experiment_names[experiment_idx]] = 0
# Append accuracies of experiment to list of all experiments
times_per_example.append(time_per_example_experiment)
# Create data frame
times_per_example_table = | pd.DataFrame(data=times_per_example, index=experiments) | pandas.DataFrame |
"""
FyleExtractConnector(): Connection between Fyle and Database
"""
import logging
from os import path
from typing import List
import pandas as pd
class FyleExtractConnector:
"""
- Extract Data from Fyle and load to Database
"""
def __init__(self, fyle_sdk_connection, dbconn):
self.__dbconn = dbconn
self.__connection = fyle_sdk_connection
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.info('Fyle connection established')
def create_tables(self):
"""
Creates DB tables
:return: None
"""
basepath = path.dirname(__file__)
ddl_path = path.join(basepath, 'extract_ddl.sql')
ddl_sql = open(ddl_path, 'r').read()
self.__dbconn.executescript(ddl_sql)
def extract_settlements(self, updated_at: List['str'] = None, exported: bool = None) -> List[str]:
"""
Extract settlements from Fyle
:param updated_at: Date string in yyyy-MM-ddTHH:mm:ss.SSSZ format along with operator in RHS colon pattern.
:param exported: True for exported settlements and False for unexported settlements
:return: List of settlement ids
"""
self.logger.info('Extracting settlements from Fyle.')
settlements = self.__connection.Settlements.get_all(updated_at=updated_at, exported=exported)
df_settlements = pd.DataFrame(settlements)
self.logger.info('%s settlements extracted.', str(len(df_settlements.index)))
if settlements:
df_settlements = df_settlements[[
'id', 'created_at', 'updated_at', 'opening_date', 'closing_date',
'employee_id', 'employee_email', 'employee_code', 'creator_employee_id',
'creator_employee_email', 'creator_employee_code', 'org_id', 'org_name',
'exported'
]]
df_settlements.to_sql('fyle_extract_settlements', self.__dbconn, if_exists='append', index=False)
return df_settlements['id'].to_list()
return []
def extract_employees(self) -> List[str]:
"""
Extract employees from Fyle
:return: List of employee ids
"""
self.logger.info('Extracting employees from Fyle.')
employees = self.__connection.Employees.get_all()
self.logger.info('%s employees extracted.', str(len(employees)))
if employees:
df_employees = pd.DataFrame(employees)
df_employees = df_employees[[
'id', 'created_at', 'updated_at', 'employee_email', 'employee_code',
'full_name', 'joining_date', 'location', 'level_id', 'level',
'business_unit', 'department_id', 'department', 'sub_department',
'approver1_email', 'approver2_email', 'approver3_email', 'title',
'branch_ifsc', 'branch_account', 'mobile', 'delegatee_email',
'default_cost_center_name', 'disabled', 'org_id', 'org_name'
]]
df_employees.to_sql('fyle_extract_employees', self.__dbconn, if_exists='append', index=False)
return df_employees['id'].to_list()
return []
def extract_expenses(self, settlement_ids: List[str] = None, state: List[str] = None,
fund_source: List[str] = None, reimbursable: bool = None, updated_at: List[str] = None,
exported: bool = None) -> List[str]:
"""
Extract expenses from Fyle
:param updated_at: Extract expenses in exported_at date range
:param exported: True for exported expenses and False for unexported expenses
:param settlement_ids: List of settlement_ids
:param state: List of expense states
:param fund_source: List of expense fund_sources
:param reimbursable: True for reimbursable expenses, False for non reimbursable expenses
:return: List of expense ids
"""
self.logger.info('Extracting expenses from Fyle.')
expenses = self.__connection.Expenses.get_all(
settlement_id=settlement_ids,
state=state,
updated_at=updated_at,
fund_source=fund_source,
exported=exported
)
if reimbursable is not None:
expenses = list(filter(lambda expense: expense['reimbursable'], expenses))
self.logger.info('%s expenses extracted.', str(len(expenses)))
if expenses:
df_expenses = | pd.DataFrame(expenses) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 24 14:43:54 2019
@author: Gary
This script is used to download a new raw set, save it if the
day of the week is in the list, and look for new events.
It will also record how many records are in each new event, runs
tripwire and uploads a webpage summary for general access.
This script runs independently of the main build_database set. It is designed
to run autonomously, can be executed from a crontab command.
occasionally the download won't work, probably because of something on the
FF end. Currently this script just exits with an exception and does not
retry.
"""
import core.Construct_set as const_set
import core.trip_wire as twire
import pandas as pd
import requests
import subprocess
import shutil
from datetime import datetime
#import hashlib
force_archive = False # use sparingly, only when not doing routine checks.
do_download = True # if False, will run routines without downloading first.
do_tripwire = True
upload_report = True # replaces last report on the web with the todays
today = datetime.today()
if today.weekday() in [4]: # Monday= 0, Sunday = 6
archive_file=True
else:
archive_file=False
if force_archive:
archive_file=True
# define
sources = './sources/'
archive = './archive/'
datefn= './sources/upload_dates.csv'
skyfn = 'sky_truth_final'
afile = archive+f'ff_archive_{today.strftime("%Y-%m-%d")}.zip'
currfn = 'testData'
lastfn = 'testData_last'
outdir = './out/'
tempfolder = './tmp/'
st = datetime.now() # start timer
# get and save files
if do_download:
url = 'http://fracfocusdata.org/digitaldownload/fracfocuscsv.zip'
print(f'Downloading data from {url}')
r = requests.get(url, allow_redirects=True,timeout=20.0)
#print(f'Download completed in {endit-st}')
if do_tripwire:
twire.backup_testData(infn=currfn+'.zip',
outfn=lastfn+'.zip',
sources=sources)
open(sources+currfn+'.zip', 'wb').write(r.content) # overwrites currfn file.
if archive_file: open(afile, 'wb').write(r.content)
## Now process file
print('Working on data set')
outdf = | pd.read_csv(datefn,quotechar='$') | pandas.read_csv |
import pandas as pd
import matplotlib as mpl
def create_stim_artists(app):
pattern = mpl.patches.Circle((0, 0),
app.p.stim_size / 2,
fc="firebrick", lw=0,
alpha=.5,
animated=True)
return dict(pattern=pattern)
def initialize_trial_figure(app):
fig = mpl.figure.Figure((5, 5), dpi=100, facecolor="white")
locator = mpl.ticker.MaxNLocator(min_n_ticks=1, integer=True)
axes = [fig.add_subplot(4, 1, i) for i in range(1, 5)]
axes[0].set(xticklabels=[],
ylim=(-.1, 1.1),
yticks=[0, 1],
yticklabels=["No", "Yes"],
ylabel="Responded")
axes[1].set(xticklabels=[],
ylim=(-.1, 1.1),
yticks=[0, 1],
yticklabels=["No", "Yes"],
ylabel="Correct")
axes[2].set(xticklabels=[],
ylim=(-1.1, 1.1),
yticks=[-1, -.5, 0, .5, 1],
yticklabels=[-1, "", 0, "", 1],
ylabel="Bet")
axes[2].axhline(+1, lw=1, color=".8", zorder=0)
axes[2].axhline(0, lw=1, color=".7", dashes=(3, 1), zorder=0)
axes[2].axhline(-1, lw=1, color=".8", zorder=0)
axes[3].axhline(+.1, lw=3, color=mpl.cm.coolwarm(.9), alpha=.5, zorder=0)
axes[3].axhline(-.1, lw=3, color=mpl.cm.coolwarm(.1), alpha=.5, zorder=0)
axes[3].set(ylim=(-5, 5),
yticks=[-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5],
yticklabels=[-5, "", "", "", "", 0, "", "", "", "", 5],
ylabel="LLR")
for ax in axes:
ax.xaxis.set_major_locator(locator)
fig.text(.55, .04, "", size=12, ha="center", va="center")
fig.subplots_adjust(.15, .125, .95, .95, )
return fig, axes
def update_trial_figure(app, trial_data):
# Create a new full dataset
trial_data = | pd.read_json(trial_data, typ="series") | pandas.read_json |
import ipdb
import fnmatch
import pandas as pd
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--path',type = str)
parser.add_argument('--savepath',type = str)
args = parser.parse_args()
path = args.path
savepath = args.savepath
# Preprocess the bedfiles
# path: the file that contain all region_cr (i.e.:'example_cohesin_regions_20200424.bed')
# savepath: path to save processed region_cr files (i.e.:'./Regions_exp/')
File = | pd.read_csv(path,sep = '\t',header=None) | pandas.read_csv |
import pandas as pd
def run(labels, model_class, **kwargs):
"""Run test.
Parameters
----------
labels: torch.LongTensor
Tensor of target (label) data.
model_class
The class of the model on which prediction will be performed.
It must implement the fit, the predict_proba and the reset_parameters method.
kwargs
The arguments used when instantiating the model.
Returns
-------
ranks_df: pd.DataFrame
A DataFrame containing the rank of the non-labeled nodes. The DataFrame is indexed the number of the nodes.
"""
use_cuda = labels.is_cuda
labeled_nodes_mask = labels.byte()
# Create model
model = model_class(**kwargs)
if use_cuda and "cuda" in dir(model):
model.cuda()
# Train model
model.fit(train_labels=labels[labeled_nodes_mask], train_mask=labeled_nodes_mask)
# Get predictions
predictions = model.predict_proba()
if predictions.is_cuda:
predictions = predictions.cpu()
predictions_df = | pd.DataFrame({"prediction": predictions}) | pandas.DataFrame |
import pandas as pd
import numpy as np
import datetime
import time
import math
from pypfopt import risk_models
from pypfopt import expected_returns
from pypfopt import black_litterman
from pypfopt.efficient_frontier import EfficientFrontier
from pypfopt.black_litterman import BlackLittermanModel
from statsmodels.tsa.arima_model import ARIMA
def filter(init, source, asset_arr=[1, 2, 3, 4], geo_arr=[7, 2, 3, 5, 4, 6, 1], score=3):
# Filter according to user's rank
asset_class = ["Equity", "Fixed Income",
"Mixed Allocation", "Money Market"]
geo_class = ["Africa& Middle West Region", "Asian Pacific Region", "European Region", "Greater China",
"International", "Latin American Region", "U.S."]
fund_num = init.shape[0]
filter_re = []
for i in range(0, fund_num):
asset_tmp = init['Asset Class'][i]
geo_tmp = init['Geographical Focus'][i]
if ((asset_tmp == asset_class[asset_arr[0] - 1] or asset_tmp == asset_class[asset_arr[1] - 1] or asset_tmp == asset_class[asset_arr[2] - 1]) and (geo_tmp == geo_class[geo_arr[0] - 1] or geo_tmp == geo_class[geo_arr[1] - 1] or geo_tmp == geo_class[geo_arr[2] - 1] or geo_tmp == geo_class[geo_arr[3] - 1])):
filter_re.append(init['ISIN'][i])
# If number of the funds filted is smaller than 100(can be specified), choose again
fund_filted_min = 100
for i in range(4, 7):
if (len(filter_re) < fund_filted_min):
for j in range(0, fund_num):
asset_tmp = init['Asset Class'][j]
if ((asset_tmp == asset_class[asset_arr[0] - 1] or asset_tmp == asset_class[asset_arr[1] - 1] or asset_tmp == asset_class[asset_arr[2] - 1]) and geo_class[geo_arr[i] - 1] == init['Geographical Focus'][j]):
filter_re.append(init['ISIN'][j])
else:
break
# data: names after filter + their risks
data = pd.DataFrame()
data.insert(loc=0, column='name', value=[])
data.insert(loc=1, column='risk', value=[])
for i in range(0, len(filter_re)):
col_index = source.columns.get_loc(filter_re[i])
price = source.iloc[:, col_index + 1]
price = price.dropna().reset_index(drop=True)
returns = np.diff(price) / price[:-1]
ann_risk = np.std(returns) * math.sqrt(252)
len_data = len(data)
data.loc[len_data, 'name'] = filter_re[i]
data.loc[len_data, 'risk'] = ann_risk
# Sort according to their risks
data_sort = data.sort_values(
axis=0, ascending=True, by='risk').reset_index(drop=True)
'''
print("\n---risk---")
print(data_sort)
print()
'''
# get corresponding funds according to scores
len_index = int(np.floor(len(data_sort['name']) / 5))
fil_name = []
if (score == 5):
for i in range(len_index * 4, len(data_sort['name'])):
fil_name.append(data_sort.loc[i, 'name'])
else:
for i in range(len_index * (score - 1), len_index * score):
fil_name.append(data_sort.loc[i, 'name'])
### result: name + returns
result = pd.DataFrame()
result.insert(loc=0, column='name', value=[])
result.insert(loc=1, column='returns', value=[])
for i in range(0, len(fil_name)):
col_index = source.columns.get_loc(fil_name[i])
price = source.iloc[:, col_index + 1]
price = price.dropna().reset_index(drop=True)
returns = np.diff(price) / price[:-1]
rets_add_one = returns + 1
cum_rets = rets_add_one.cumprod() - 1
len_data = len(result)
result.loc[len_data, 'name'] = fil_name[i]
result.loc[len_data, 'returns'] = cum_rets[len(cum_rets) - 1]
# Sort according to their returns
result_sort = result.sort_values(
axis=0, ascending=False, by='returns').reset_index(drop=True)
'''
print("\n---return---")
print(result_sort)
print()
'''
# name_final: 5 names
name_final = []
for i in range(0, 5):
name_final.append(result_sort.loc[i, 'name'])
# price_five: 5 names + their prices
price_five = pd.DataFrame()
for i in range(0, len(name_final)):
price_five.insert(loc=i * 2, column=i, value=[])
price_five.insert(loc=i * 2 + 1, column=name_final[i], value=[])
for i in range(0, len(name_final)):
col_index = source.columns.get_loc(name_final[i])
date = source.iloc[:, col_index]
price = source.iloc[:, col_index + 1]
price_five.iloc[:, i * 2 + 1] = price
price_five.iloc[:, i * 2] = date
# combine
tmp = pd.DataFrame()
tmp.insert(loc=0, column='date', value=[])
tmp.insert(loc=1, column=name_final[0], value=[])
tmp['date'] = price_five.iloc[:, 0]
tmp[name_final[0]] = price_five.iloc[:, 1]
for i in range(1, 5):
price_five.rename(columns={i: 'date'}, inplace=True)
tmp = pd.merge(
tmp, price_five.iloc[:, 2 * i:2 * i + 2], on='date', how='outer')
tmp = tmp.sort_values(axis=0, ascending=True,
by='date').reset_index(drop=True)
tmp = tmp.iloc[:len(source), :]
tmp = tmp.dropna(how='all')
data_date_tmp = list(tmp['date']).copy()
for i in range(0, len(data_date_tmp)):
if(type(data_date_tmp[i]) != type("aha")):
break
tempt = datetime.datetime.strptime(data_date_tmp[i], "%Y/%m/%d")
y = tempt.year
m = tempt.month
d = tempt.day
data_date_tmp[i] = y * 365 + m * 30 + d
tmp['trans'] = data_date_tmp
tmp = tmp.sort_values(axis=0, ascending=True,
by='trans').reset_index(drop=True)
tmp = tmp.iloc[:len(source), :6]
filter1 = tmp.set_index('date')
return filter1
# filter1.to_csv("filter1.csv")
# print(filter1)
# df1 = pd.DataFrame({'d': ['2018/1/1', np.nan,'2019/8/3'], 'd1': [1,2,np.nan]})
# df2 = pd.DataFrame({'d': ['2018/1/1', '2019/1/3'], 'd2': [1,3]})
# df=pd.merge(df1,df2, on='d', how='outer')
# df=df.sort_values(axis=0, ascending=True, by='d').reset_index(drop=True)
# print(df)
def seven(data):
#data = pd.read_csv("filter1.csv",header = 0,index_col=[0])
#print(data)
data_fund = | pd.read_csv("newfund.csv", header=0, encoding="UTF-8") | pandas.read_csv |
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, Series, date_range, offsets
import pandas._testing as tm
class TestDataFrameShift:
def test_shift(self, datetime_frame, int_frame):
# naive shift
shiftedFrame = datetime_frame.shift(5)
tm.assert_index_equal(shiftedFrame.index, datetime_frame.index)
shiftedSeries = datetime_frame["A"].shift(5)
tm.assert_series_equal(shiftedFrame["A"], shiftedSeries)
shiftedFrame = datetime_frame.shift(-5)
tm.assert_index_equal(shiftedFrame.index, datetime_frame.index)
shiftedSeries = datetime_frame["A"].shift(-5)
tm.assert_series_equal(shiftedFrame["A"], shiftedSeries)
# shift by 0
unshifted = datetime_frame.shift(0)
tm.assert_frame_equal(unshifted, datetime_frame)
# shift by DateOffset
shiftedFrame = datetime_frame.shift(5, freq=offsets.BDay())
assert len(shiftedFrame) == len(datetime_frame)
shiftedFrame2 = datetime_frame.shift(5, freq="B")
| tm.assert_frame_equal(shiftedFrame, shiftedFrame2) | pandas._testing.assert_frame_equal |
import numpy as np
import pytest
from pandas import (
DataFrame,
NaT,
Series,
Timedelta,
Timestamp,
)
import pandas._testing as tm
def test_group_shift_with_null_key():
# This test is designed to replicate the segfault in issue #13813.
n_rows = 1200
# Generate a moderately large dataframe with occasional missing
# values in column `B`, and then group by [`A`, `B`]. This should
# force `-1` in `labels` array of `g.grouper.group_info` exactly
# at those places, where the group-by key is partially missing.
df = DataFrame(
[(i % 12, i % 3 if i % 3 else np.nan, i) for i in range(n_rows)],
dtype=float,
columns=["A", "B", "Z"],
index=None,
)
g = df.groupby(["A", "B"])
expected = DataFrame(
[(i + 12 if i % 3 and i < n_rows - 12 else np.nan) for i in range(n_rows)],
dtype=float,
columns=["Z"],
index=None,
)
result = g.shift(-1)
tm.assert_frame_equal(result, expected)
def test_group_shift_with_fill_value():
# GH #24128
n_rows = 24
df = DataFrame(
[(i % 12, i % 3, i) for i in range(n_rows)],
dtype=float,
columns=["A", "B", "Z"],
index=None,
)
g = df.groupby(["A", "B"])
expected = DataFrame(
[(i + 12 if i < n_rows - 12 else 0) for i in range(n_rows)],
dtype=float,
columns=["Z"],
index=None,
)
result = g.shift(-1, fill_value=0)
tm.assert_frame_equal(result, expected)
def test_group_shift_lose_timezone():
# GH 30134
now_dt = Timestamp.utcnow()
df = DataFrame({"a": [1, 1], "date": now_dt})
result = df.groupby("a").shift(0).iloc[0]
expected = Series({"date": now_dt}, name=result.name)
tm.assert_series_equal(result, expected)
def test_group_diff_real_series(any_real_numpy_dtype):
df = DataFrame(
{"a": [1, 2, 3, 3, 2], "b": [1, 2, 3, 4, 5]},
dtype=any_real_numpy_dtype,
)
result = df.groupby("a")["b"].diff()
exp_dtype = "float"
if any_real_numpy_dtype in ["int8", "int16", "float32"]:
exp_dtype = "float32"
expected = Series([np.nan, np.nan, np.nan, 1.0, 3.0], dtype=exp_dtype, name="b")
tm.assert_series_equal(result, expected)
def test_group_diff_real_frame(any_real_numpy_dtype):
df = DataFrame(
{
"a": [1, 2, 3, 3, 2],
"b": [1, 2, 3, 4, 5],
"c": [1, 2, 3, 4, 6],
},
dtype=any_real_numpy_dtype,
)
result = df.groupby("a").diff()
exp_dtype = "float"
if any_real_numpy_dtype in ["int8", "int16", "float32"]:
exp_dtype = "float32"
expected = DataFrame(
{
"b": [np.nan, np.nan, np.nan, 1.0, 3.0],
"c": [np.nan, np.nan, np.nan, 1.0, 4.0],
},
dtype=exp_dtype,
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[
Timestamp("2013-01-01"),
Timestamp("2013-01-02"),
Timestamp("2013-01-03"),
],
[Timedelta("5 days"), Timedelta("6 days"), Timedelta("7 days")],
],
)
def test_group_diff_datetimelike(data):
df = | DataFrame({"a": [1, 2, 2], "b": data}) | pandas.DataFrame |
from pandas import DataFrame, Index, Series
from numpy import ndarray
from lasso.dyna import Binout
from plotly.graph_objects import (
Figure,
Scatter,
Layout
)
class Extended_Binout(Binout):
def read(self, *args):
super().read.__doc__
# automatically sort returned lists for readability
if type(super().read(*args)) == list:
return sorted(super().read(*args))
else:
return super().read(*args)
def legend(self, db):
"""Legend as DataFrame
Parameters
----------
db : str
The database for the desired legend (e.g. 'matsum')
Returns
-------
DataFrame
Legend with ID and title pairs
"""
if 'legend' not in super().read(db):
raise ValueError(db + " has no legend")
legend = super().read(db, 'legend')
if 'legend_ids' in super().read(db):
id = 'legend_ids'
else:
id = 'ids'
df = DataFrame({
'id': super().read(db, id),
'title': [legend[i:i + 80].strip()
for i in range(0, len(legend), 80)]
})
return df
def as_df(self, *args) -> DataFrame:
"""Read data as a pandas DataFrame.
See docs for `Binout.read().
"""
data = super().read(*args)
# validate time-based data
if not isinstance(data, ndarray):
err_msg = "data is not a numpy array but has type '{0}'"
raise ValueError(err_msg.format(type(data)))
time_array = super().read(*args[:-1], 'time')
if data.shape[0] != time_array.shape[0]:
raise ValueError(
"data series length does not match time array length"
)
time_pdi = | Index(time_array, name='time') | pandas.Index |
from matplotlib.axes import Axes
from mpl_format.axes.axis_utils import new_axes
from mpl_format.text.text_utils import wrap_text
from nltk import RegexpTokenizer, WordNetLemmatizer
from nltk.corpus import stopwords
from nltk.tokenize.api import TokenizerI
from pandas import Series, DataFrame, concat
from typing import List, Optional, Set, Union
from survey.mixins.data_mixins import ObjectDataMixin
from survey.mixins.data_types.textual_mixin import TextualMixin
from survey.mixins.named import NamedMixin
from survey.questions._abstract.question import Question
from survey.utils.nlp import pre_process_text_series
class FreeTextQuestion(NamedMixin, ObjectDataMixin, TextualMixin, Question):
"""
A Question with a Free-Text response.
"""
def __init__(self, name: str, text: str, data: Optional[Series] = None):
"""
Create a new FreeText Question.
:param name: A pythonic name for the question.
:param text: The text of the question.
:param data: Optional pandas Series of responses.
"""
self._set_name_and_text(name, text)
self.data = data
def _validate_data(self, data: Series):
pass
def plot_distribution(self, data: Optional[Series] = None,
transpose: bool = False,
top: int = 25,
title: bool = True,
x_label: bool = True, y_label: bool = True,
ax: Optional[Axes] = None) -> Axes:
"""
Plot the distribution of top words in answers to the Question.
:param data: The answers given by Respondents to the Question.
:param transpose: Whether to transpose the labels to the y-axis.
:param top: Number of most frequent words to plot.
:param title: Whether to add a title to the plot.
:param x_label: Whether to add a label to the x-axis.
:param y_label: Whether to add a label to the y-axis.
:param ax: Optional matplotlib axes to plot on.
"""
data = data if data is not None else self._data
if data is None:
raise ValueError('No data!')
words = pre_process_text_series(data)
value_counts = Series(words).value_counts()[:top]
plot_type = 'barh' if transpose else 'bar'
ax = ax or new_axes()
value_counts.index = wrap_text(value_counts.index)
value_counts.plot(kind=plot_type, ax=ax)
if title:
ax.set_title(self.text)
if transpose:
x_label_value = '# Respondents'
y_label_value = data.name
else:
x_label_value = data.name
y_label_value = '# Respondents'
if x_label:
ax.set_xlabel(x_label_value)
else:
ax.set_xlabel('')
if y_label:
ax.set_ylabel(y_label_value)
else:
ax.set_ylabel('')
return ax
@staticmethod
def word_counts(data: Series,
stop_words: Union[Set[str], str] = 'english',
tokenizer: Union[TokenizerI, str] = r'\w+',
lemmatizer=None) -> Series:
"""
Return a count of each word in the series of responses.
:param data: Series containing response texts.
:param stop_words: Set of stop words or language.
:param tokenizer: TokenizerI or string to pass to RegexpTokenizer.
:param lemmatizer: Optional Lemmatizer. Defaults to WordNetLemmatizer.
"""
if isinstance(stop_words, str):
stop_words = set(stopwords.words(stop_words))
if isinstance(tokenizer, str):
tokenizer = RegexpTokenizer(tokenizer)
if lemmatizer is None:
lemmatizer = WordNetLemmatizer()
def process(response: str) -> List[str]:
"""
Process a single string.
"""
words = tokenizer.tokenize(response.lower())
words = [w for w in words if w not in stop_words]
words = [lemmatizer.lemmatize(w) for w in words]
return words
processed = data.map(process)
word_counts = Series([
word for _, response in processed.iteritems()
for word in response
]).value_counts()
return word_counts
def distribution_table(
self, data: Optional[Series] = None,
top: int = 25,
) -> DataFrame:
"""
Return a table of the top words found in answers given to the Question.
:param data: Optional Series containing response texts.
:param top: Number of words to return counts for.
"""
data = data if data is not None else self._data
if data is None:
raise ValueError('No data!')
words = pre_process_text_series(data)
value_counts = Series(words).value_counts()[:top].rename('Count')
value_counts.index.name = 'Word'
word_counts = (
value_counts.reset_index()
.sort_values('Word')
.sort_values('Count', ascending=False)
.reset_index()
)
word_counts = word_counts.sort_values(
['Count', 'Word'], ascending=[False, True]
).reset_index()[['Word', 'Count']]
return word_counts
def stack(self, other: 'FreeTextQuestion',
name: Optional[str] = None,
text: Optional[str] = None) -> 'FreeTextQuestion':
if self.data.index.names != other.data.index.names:
raise ValueError('Indexes must have the same names.')
new_data = | concat([self.data, other.data]) | pandas.concat |
from math import pi
import numpy as np
import sklearn as sk
import scipy as sp
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.base import BaseEstimator, TransformerMixin
from scipy.spatial import distance
import emm
def compute_probs(data, bins="auto"):
"""
Computes probability of data point falling in a given bins
Arguments:
data : pandas series or 2-d dataframe with "weights" column
bins : number of bins to use in histogram approximation
Returns : 1-d array of probabilities
"""
# If data is dataframe then get weights
if not isinstance(data, pd.Series):
args = {"weights": np.array(data["weights"])}
data = np.array(data.drop(columns=["weights"]), ndmin=0)
data = data.flatten()
else:
args = {}
data = np.array(data, ndmin=1)
# Get bins from data
bins = np.histogram_bin_edges(
data, bins=bins, range=(np.nanmin(data), np.nanmax(data))
)
# Calculate histogram
h, e = np.histogram(data, bins=bins, **args)
# Calculate probabilities
p = h / data.shape[0]
# Return bin edges and probs
return e, p
def support_intersection(p, q):
"""
Get overlapping parts of distribution
Arguments:
p : 1-d array of probabilites
q : 1-d array of probabilites
Returns:
sup_int : tuple of overlapping probabilities
"""
sup_int = list(filter(lambda x: (x[0] != 0) & (x[1] != 0), zip(p, q)))
return sup_int
def get_probs(list_of_tuples):
"""
Gets probabilties from tuples
Arguments:
list_of_tuples : list of tuples with overlapping probabilities
Returns:
p : 1-d array of probabilities
q : 1-d array of probabilities
"""
p = np.array([p[0] for p in list_of_tuples])
q = np.array([p[1] for p in list_of_tuples])
return p, q
def kl_divergence(p, q):
"""
Compute KL Divergence from two lists of probabilities from a distribution
Arguments:
p : 1-d array of probabilities
q : 1-d array of probabilities
Returns:
KL Divergence, D_KL(p || q)
"""
return np.sum(sp.special.kl_div(p, q))
def js_distance(p, q):
"""
Compute JS Distance from two lists of probabilities from a distribution
Arguments:
p : 1-d array of probabilities
q : 1-d array of probabilities
Returns:
JS Distance, D_JS(p || q)
"""
return distance.jensenshannon(p, q, base=2)
def compute_kl_divergence(original_sample, weighted_sample, bins=10):
"""
Computes the KL Divergence using the support
intersection between two different samples.
Arguments:
original_sample : 1-d array or dataframe with weights of samples
weighted_sample : 1-d array or dataframe with weights of samples
bins : number of bins to use in histogram
Returns:
KL Divergence of from two samples distributions
"""
e, p = compute_probs(original_sample, bins=bins)
_, q = compute_probs(weighted_sample, bins=e)
list_of_tuples = support_intersection(p, q)
p, q = get_probs(list_of_tuples)
return kl_divergence(p, q)
def compute_js_distance(target, weighted, bins="auto"):
"""
Computes the JS Distance using the support
intersection between two different samples.
Arguments:
target : 1-d array or dataframe with weights of samples
weighted : 1-d array or dataframe with weights of samples
bins : number of bins to use in histogram
Returns:
KL Divergence of from two samples distributions
"""
js_s = {}
weighted["weights"] = weighted["weights"] * weighted["Outcome"].nunique()
for outcome in target["Outcome"].unique():
total_js = {}
for feature in target.drop(columns="Outcome").columns:
e, p = compute_probs(
target[target["Outcome"] == outcome][feature], bins=bins
)
_, q = compute_probs(
weighted[weighted["Outcome"] == outcome][[feature, "weights"]], bins=e
)
total_js[feature] = js_distance(p, q)
js_s[outcome] = total_js
return js_s
def compare_model(
target_model,
weighted_model,
X_target_test,
y_target_test,
X_weighted_test,
y_weighted_test,
classifier="Unknown",
metrics=[sk.metrics.accuracy_score],
):
"""
Compares target ML model and weighted ML for given metrics
Arguments:
target_model : sklearn model or pipeline
weighted_model : sklearn model or pipeline
X_target_test : test feature data from target dataset
y_target_test : test label data from target dataset
X_weighted_test : test feature data from reweighted dataset
y_weighted_test : test label data from reweighted dataset
classifier : Name of classifier being tested
metrics : list of metrics to return scores for
Returns:
list with dictionary of scores
"""
if type(metrics) is not list:
metrics = [metrics]
scores = []
weights_test = X_weighted_test[:, 0]
for i, metric in enumerate(metrics):
if metric == sk.metrics.roc_auc_score:
RR_pred = target_model.predict_proba(X_target_test)[:, 1]
RS_pred = target_model.predict_proba(X_weighted_test)[:, 1]
SS_pred = weighted_model.predict_proba(X_weighted_test)[:, 1]
SR_pred = weighted_model.predict_proba(X_target_test)[:, 1]
else:
RR_pred = target_model.predict(X_target_test)
RS_pred = target_model.predict(X_weighted_test)
SS_pred = weighted_model.predict(X_weighted_test)
SR_pred = weighted_model.predict(X_target_test)
RR_score = metric(y_target_test, RR_pred)
RS_score = metric(y_weighted_test, RS_pred, sample_weight=weights_test)
SS_score = metric(y_weighted_test, SS_pred, sample_weight=weights_test)
SR_score = metric(y_target_test, SR_pred)
scores.append(
{
metric.__name__: type(classifier[0]).__name__,
"RR": RR_score,
"RS": RS_score,
"SS": SS_score,
"SR": SR_score,
}
)
return scores
def train_test_splits_create(target_data, weighted_data, test_size, label):
X_target = target_data.drop(columns=label)
X_target.insert(0, "weights", np.ones(X_target.shape[0]) / X_target.shape[0])
y_target = target_data[label]
X_target_train, X_target_test, y_target_train, y_target_test = train_test_split(
np.array(X_target),
np.array(y_target),
test_size=test_size,
)
# Training data from weighted corpus
X_weighted = weighted_data.drop(columns=label)
# shift column 'Name' to first position
first_column = X_weighted.pop("weights")
# insert column using insert(position,column_name,
# first_column) function
X_weighted.insert(0, "weights", first_column)
y_weighted = weighted_data[label]
(
X_weighted_train,
X_weighted_test,
y_weighted_train,
y_weighted_test,
) = train_test_split(
np.array(X_weighted), np.array(y_weighted), test_size=test_size
)
return (
X_target_train,
y_target_train,
X_target_test,
y_target_test,
X_weighted_train,
y_weighted_train,
X_weighted_test,
y_weighted_test,
)
def print_cv_results(best_clf, data_name=""):
print(
"{} data: the best parameters are given by \n {}".format(
data_name, best_clf.best_params_["classifier"]
)
+ "\n the best mean cross-validation accuracy {} +/- {}% on training dataset \n".format(
round(best_clf.best_score_ * 100, 5),
round(
best_clf.cv_results_["std_test_score"][best_clf.best_index_] * 100,
5,
),
)
)
# customized transformer
class WeightRemover(TransformerMixin, BaseEstimator):
def fit(self, X, y=None, **fit_params):
return self
def transform(self, X, y=None, **fit_params):
return X[:, 1:]
def classifier_metric(
target_data,
weighted_data,
param_grid,
test_size=0.2,
label="Outcome",
return_models=False,
**kwargs
):
(
X_target_train,
y_target_train,
X_target_test,
y_target_test,
X_weighted_train,
y_weighted_train,
X_weighted_test,
y_weighted_test,
) = train_test_splits_create(target_data, weighted_data, test_size, label)
scoring = kwargs.pop("scoring", sk.metrics.accuracy_score)
scoring = sk.metrics.get_scorer(scoring)
def weight_remover_scorer(estimator, X, y):
if scoring == sk.metrics.roc_auc_score:
print("test")
y_pred = estimator.predict_proba(X)[:, 1]
else:
y_pred = estimator.predict(X)
w = X[:, 0]
return scoring(y, y_pred, sample_weight=w)
classifier = param_grid["classifier"]
pipe_steps = kwargs.get("pipeline_steps", []).copy()
pipe_steps.extend(
[("remove_weight", WeightRemover()), ("classifier", classifier[0])]
)
pipe = Pipeline(pipe_steps)
cv = kwargs.get("cv", 5)
verbose = kwargs.get("verbose", False)
target_clf = sk.model_selection.GridSearchCV(
pipe,
param_grid=[param_grid],
cv=cv,
verbose=verbose,
n_jobs=-1,
scoring=weight_remover_scorer,
)
target_clf.fit(
X_target_train, y_target_train, classifier__sample_weight=X_target_train[:, 0]
)
pipe = Pipeline(pipe_steps)
weighted_clf = sk.model_selection.GridSearchCV(
pipe,
param_grid=[param_grid],
cv=cv,
verbose=verbose,
n_jobs=-1,
scoring=weight_remover_scorer,
)
weighted_clf.fit(
np.array(X_weighted_train),
y_weighted_train,
classifier__sample_weight=X_weighted_train[:, 0],
)
if verbose:
print_cv_results(target_clf, "Target")
print_cv_results(weighted_clf, "Weighted")
metrics = kwargs.pop("metrics", [sk.metrics.accuracy_score])
scores = compare_model(
target_clf.best_estimator_,
weighted_clf.best_estimator_,
X_target_test,
y_target_test,
X_weighted_test,
y_weighted_test,
classifier,
metrics=metrics,
)
if return_models:
return [target_clf, weighted_clf, scores]
return scores
def multiple_models(target, corpus, margs, param_grid, test_size=0.2, **kwargs):
rws = []
js = []
metrics = []
bins = kwargs.pop("bins", "auto")
if type(param_grid) != list:
param_grid = [param_grid]
if type(margs) != list:
margs = [margs]
for marg in margs:
rw = emm.reweighting.generate_synth(corpus, marg, **kwargs)
rws += [rw]
js += [compute_js_distance(target, rw, bins=bins)]
metric = []
for params in param_grid:
metric += [
classifier_metric(target, rw, params, test_size=test_size, **kwargs)
]
metrics += metric
if len(rws) == 1:
rws = rws[0]
if len(js) == 1:
js = js[0]
if len(metrics) == 1:
metrics = metrics[0]
return rws, js, metrics
if __name__ == "__main__":
import emm
import sklearn as sk
def weight_remover_scorer(estimator, X, y):
y_pred = estimator.predict(X)
w = X[:, -1]
return sk.metrics.accuracy_score(y, y_pred, sample_weight=w)
# Generate example data
m = 10000
# Target distribution
mu0 = np.array([-0.3])
sig0 = np.array([0.35])
mu1 = np.array([0.2])
sig1 = np.array([0.3])
rv0 = sp.stats.skewnorm(a=0, loc=mu0[0], scale=sig0[0])
rv1 = sp.stats.skewnorm(a=0, loc=mu1[0], scale=sig1[0])
X0 = rv0.rvs(size=m // 4)
X1 = rv1.rvs(size=m // 4)
y0 = np.zeros(m // 4)
y1 = np.ones(m // 4)
X = np.concatenate([X0, X1])
y = np.concatenate([y0, y1])
target = | pd.DataFrame({"feature": X}) | pandas.DataFrame |
"""
Copyright 2016 <NAME>, <NAME>, <NAME>, BlackRock Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import copy
import logging
import time
import multiprocess
import numpy as np
import pandas as pd
import cvxpy as cvx
from .returns import MultipleReturnsForecasts
from .result import SimulationResult
from .costs import BaseCost
# TODO update benchmark weights (?)
# Also could try jitting with numba.
class MarketSimulator():
logger = None
def __init__(self, market_returns, costs,
market_volumes=None, cash_key='cash'):
"""Provide market returns object and cost objects."""
self.market_returns = market_returns
if market_volumes is not None:
self.market_volumes = market_volumes[
market_volumes.columns.difference([cash_key])]
else:
self.market_volumes = None
self.costs = costs
for cost in self.costs:
assert (isinstance(cost, BaseCost))
self.cash_key = cash_key
def propagate(self, h, u, t):
"""Propagates the portfolio forward over time period t, given trades u.
Args:
h: pandas Series object describing current portfolio
u: n vector with the stock trades (not cash)
t: current time
Returns:
h_next: portfolio after returns propagation
u: trades vector with simulated cash balance
"""
assert (u.index.equals(h.index))
if self.market_volumes is not None:
# don't trade if volume is null
null_trades = self.market_volumes.columns[
self.market_volumes.loc[t] == 0]
if len(null_trades):
logging.info('No trade condition for stocks %s on %s' %
(null_trades, t))
u.loc[null_trades] = 0.
hplus = h + u
costs = [cost.value_expr(t, h_plus=hplus, u=u) for cost in self.costs]
for cost in costs:
assert(not | pd.isnull(cost) | pandas.isnull |
import pandas as pd
import os
class InputFileGenerator:
def __init__(self, path_to_file):
self.path_to_file = path_to_file
def __fill_in_missing_values(self, df):
list_of_years_to_calculate_for = sorted(
[name for name in list(df.columns) if name.isnumeric()]
)[1:-1]
for year in list_of_years_to_calculate_for:
next5 = str(int(year) + 5)
prev5 = str(int(year) - 5)
df[str(year)].fillna((df[next5] + df[prev5]) / 2.0, inplace=True)
def __drop_unwanted_years(self, df, wanted_start_year, wanted_end_year):
list_of_columns_to_erase = []
# Determine the column names (years) to erase
for column_name in list(df):
if column_name.isnumeric() and int(column_name) not in list(
range(wanted_start_year, wanted_end_year + 1)
):
list_of_columns_to_erase.append(column_name)
# Erase the unwanted column names (years)
df.drop(columns=list_of_columns_to_erase, inplace=True)
def get_dataframe_from_single_variable(self, variable):
variable_name = variable["variable_name"]
start_year = variable["start_year"]
end_year = variable["end_year"]
df = pd.read_csv(self.path_to_file)
final = df[df["Variable"] == variable_name]
self.__fill_in_missing_values(final)
self.__drop_unwanted_years(final, start_year, end_year)
return final
def __perform_calculation_on_scenario(self, dfgroup, variable):
result = {}
result["Model"] = list(dfgroup["Model"])[0]
result["Scenario"] = list(dfgroup["Scenario"])[0]
result["Scenario Group"] = list(dfgroup["Scenario Group"])[0]
result["Region"] = list(dfgroup["Region"])[0]
result["Variable"] = variable["variable_name"]
result["Unit"] = variable["unit"]
# Getting the list of years
list_of_years = list(dfgroup._get_numeric_data().columns)
# list of index in the dataframe
indeces = list(dfgroup.index)
# Determining index of numerator and denominator
denomitanotor_index = 0
numerator_index = 0
if dfgroup.loc[indeces[0]]["Variable"] == variable["denominator"]:
denomitanotor_index = indeces[0]
numerator_index = indeces[1]
else:
denomitanotor_index = indeces[1]
numerator_index = indeces[0]
# Looping through each year and performing the calculation
for year in list_of_years:
result[year] = (
dfgroup.loc[numerator_index][year]
/ dfgroup.loc[denomitanotor_index][year]
)
# Creating a dataframe from the dictionary
return | pd.DataFrame([result]) | pandas.DataFrame |
import os
import time
import shutil
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
from torch.nn.utils import clip_grad_norm_
import numpy as np
from config import parser
args = parser.parse_args()
import pickle
from network import Two_Stream_RNN
from dataloader import Face_Dataset, UtteranceRecord
from sklearn.metrics import mean_squared_error
from torch.autograd import Variable as Variable
import copy
from tqdm import tqdm
import glob
from Same_Length_Sampler import SameLengthBatchSampler
import pandas as pd
class My_loss(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
vx = x - torch.mean(x)
vy = y - torch.mean(y)
rho = torch.sum(vx * vy) / (torch.sqrt(torch.sum(torch.pow(vx, 2))) * torch.sqrt(torch.sum(torch.pow(vy, 2))))
x_m = torch.mean(x)
y_m = torch.mean(y)
x_s = torch.std(x)
y_s = torch.std(y)
ccc = 2*rho*x_s*y_s/(torch.pow(x_s, 2) + torch.pow(y_s, 2) + torch.pow(x_m - y_m, 2))
return -ccc
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def ccc(y_true, y_pred):
true_mean = np.mean(y_true)
pred_mean = np.mean(y_pred)
v_pred = y_pred - pred_mean
v_true = y_true - true_mean
rho = np.sum(v_pred*v_true) / (np.sqrt(np.sum(v_pred**2)) * np.sqrt(np.sum(v_true**2)))
std_predictions = np.std(y_pred)
std_gt = np.std(y_true)
ccc = 2 * rho * std_gt * std_predictions / (
std_predictions ** 2 + std_gt ** 2 +
(pred_mean - true_mean) ** 2)
return ccc, rho
def check_rootfolders():
"""Create log and model folder"""
folders_util = [args.root_log, args.root_model, args.root_output, args.root_tensorboard]
folders_util = ["%s/"%(args.save_root) +folder for folder in folders_util]
for folder in folders_util:
if not os.path.exists(folder):
print('creating folder ' + folder)
os.makedirs(folder)
def main():
root_path = args.root_path
label_name = args.label_name
if args.cnn == 'resnet50':
feature_root = '/media/newssd/OMG_experiments/Extracted_features/resnet50_ferplus_features_fps=30_pool5_7x7_s1'
elif args.cnn == 'vgg':
feature_root = '/media/newssd/OMG_experiments/Extracted_features/vgg_fer_features_fps=30_pool5'
if len(args.store_name)==0:
args.store_name = '_'.join( [label_name,
'cnn:{}'.format(args.cnn),
'loss_type:{}'.format(args.loss_type),
'batch_size:{}'.format(args.batch_size),
'cat_before_gru:{}'.format(args.cat_before_gru),
'freeze:{}'.format(args.freeze),
'fusion:{}'.format(args.fusion)])
if len(args.save_root)==0:
setattr(args, 'save_root', args.store_name)
else:
setattr(args, 'save_root', os.path.join(args.save_root, args.store_name))
print("save experiment to :{}".format(args.save_root))
check_rootfolders()
num_class = 1 if not "_" in args.label_name else 2
setattr(args, 'num_class', num_class)
if args.loss_type == 'mse':
criterion = nn.MSELoss().cuda()
elif args.loss_type=='ccc':
criterion = My_loss().cuda()
else: # another loss is mse or mae
raise ValueError("Unknown loss type")
L = args.length
train_dict = pickle.load(open(args.train_dict, 'rb'))
val_dict = pickle.load(open(args.val_dict, 'rb'))
train_dict.update(val_dict)
train_val_dict = copy.copy(train_dict)
video_names = sorted(list(train_dict.keys()))
np.random.seed(0)
video_indexes = np.random.permutation(len(video_names))
video_names = [video_names[i] for i in video_indexes]
if args.test:
run_5_fold_prediction_on_test_set(feature_root)
for i in range(5):
########################### Modify the classifier ###################
model = Two_Stream_RNN(mlp_hidden_units=args.hidden_units, phase_size=48, phase_channels=2*L,
phase_hidden_size=256, cat_before_gru=args.cat_before_gru, gru_hidden = 64, gru_num_layers=2, fusion=args.fusion)
########################### Modify the classifier ###################
pytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Total Params: {}".format(pytorch_total_params))
phasenet_param = sum(p.numel() for p in model.phase_net.parameters() if p.requires_grad)
print("Temporal Stream params: {} ({:.2f})".format( phasenet_param, phasenet_param/float(pytorch_total_params)))
mlp_param = sum(p.numel() for p in model.mlp.parameters() if p.requires_grad)
print("Spatial Stream params: {} ({:.2f})".format( mlp_param, mlp_param/float(pytorch_total_params)))
model.cuda()
if args.cat_before_gru:
params_dict = [{'params': model.rnns.parameters(), 'lr':args.lr},
{'params': model.classifier.parameters(), 'lr':args.lr},
{'params': model.fusion_module.parameters(), 'lr':args.lr}]
else:
params_dict = [{'params': model.rnns_spatial.parameters(), 'lr':args.lr},
{'params': model.rnns_temporal.parameters(), 'lr':args.lr},
{'params': model.classifier.parameters(), 'lr':args.lr},
{'params': model.fusion_module.parameters(), 'lr':args.lr}]
if not args.freeze:
params_dict += [{'params': model.mlp.parameters(), 'lr':args.lr},
{'params': model.phase_net.parameters(), 'lr':args.lr}]
optimizer = torch.optim.SGD(params_dict, # do not set learn rate for mlp and
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
torch.cuda.empty_cache()
cudnn.benchmark = True
length = len(video_names)//5
# five fold cross validation
val_video_names = video_names[i*length:(i+1)*length]
if i==4:
val_video_names = video_names[i*length:]
train_video_names = [name for name in video_names if name not in val_video_names]
train_video_names = video_names # delete it later
train_dict = {key:train_val_dict[key] for key in train_video_names}
val_dict = {key:train_val_dict[key] for key in val_video_names}
train_dataset = Face_Dataset([os.path.join(root_path,'Train'), os.path.join(root_path,'Validation')], feature_root, train_dict, label_name, py_level=args.py_level,
py_nbands=args.py_nbands, sample_rate = args.sample_rate, num_phase=L, phase_size=48, test_mode=False,
return_phase=False)
val_dataset = Face_Dataset([os.path.join(root_path,'Train'), os.path.join(root_path,'Validation')], feature_root, val_dict, label_name, py_level=args.py_level,
py_nbands=args.py_nbands, sample_rate = args.sample_rate, num_phase=L, phase_size=48, test_mode=True,
return_phase=False)
train_batch_sampler = SameLengthBatchSampler(train_dataset.indices_list, batch_size=args.batch_size, drop_last=True)
val_batch_sampler = SameLengthBatchSampler(val_dataset.indices_list, batch_size = args.eval_batch_size, drop_last=True, random=False)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_sampler=train_batch_sampler,
num_workers=args.workers, pin_memory=False)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_sampler=val_batch_sampler,
num_workers=args.workers, pin_memory=False)
print("train dataset:{}".format(len(train_dataset)))
print("val dataset:{}".format(len(val_dataset)))
log = open(os.path.join(args.save_root, args.root_log, 'fold_{}.txt'.format(i)), 'w')
output = "\n Fold: {}\n".format(i)
log.write(output)
log.flush()
best_loss = 1000
best_ccc = -100
val_accum_epochs = 0
for epoch in range(args.epochs):
adjust_learning_rate(optimizer, epoch, args.lr_steps)
train_mean, train_std = train(train_loader, model, criterion, optimizer, epoch, log)
log_train_mean_std = open(os.path.join(args.save_root, args.root_log, 'mean_std_{}.txt'.format(i)), 'w')
log_train_mean_std.write("{} {}".format(train_mean, train_std))
log_train_mean_std.flush()
torch.cuda.empty_cache()
if (epoch + 1) % args.eval_freq == 0 or epoch == args.epochs - 1:
loss_val, ccc_current_val = validate(val_loader, model, criterion, (epoch + 1) * len(train_loader), log, train_mean, train_std)
is_best_loss = loss_val< best_loss
best_loss = min(loss_val, best_loss)
is_best_ccc = ccc_current_val >best_ccc
best_ccc = max(ccc_current_val , best_ccc)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
}, is_best_loss, is_best_ccc, filename='fold_{}'.format(i))
if not is_best_ccc:
val_accum_epochs+=1
else:
val_accum_epochs=0
if val_accum_epochs>=args.early_stop:
print("validation ccc did not improve over {} epochs, stop".format(args.early_stop))
break
run_5_fold_prediction_on_test_set(feature_root)
def run_5_fold_prediction_on_test_set(feature_root):
test_dataset = Face_Dataset(os.path.join(args.root_path,'Test'), feature_root, args.test_dict, args.label_name, py_level=args.py_level,
py_nbands=args.py_nbands, sample_rate = args.sample_rate, num_phase=args.num_phase, phase_size=48, test_mode=True,
return_phase=False)
print("test dataset:{}".format(len(test_dataset)))
test_batch_sampler = SameLengthBatchSampler(test_dataset.indices_list, batch_size = args.eval_batch_size, drop_last=False, random=False)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_sampler=test_batch_sampler,
num_workers=args.workers, pin_memory=False)
for i in range(5):
file = open(os.path.join(args.save_root, args.root_log, 'mean_std_{}.txt'.format(i)), 'r')
string = file.readline()
train_mean, train_std = string.split(" ")
train_mean = float(train_mean)
train_std = float(train_std)
# resume
model = Two_Stream_RNN(mlp_hidden_units=args.hidden_units, phase_size=48, phase_channels=2*args.num_phase, phase_hidden_size=256, cat_before_gru=args.cat_before_gru)
model.cuda()
saved_model_path = os.path.join(args.save_root, args.root_model, 'fold_{}_best_ccc.pth.tar'.format(i))
checkpoint = torch.load(saved_model_path)
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
print(("=> loading checkpoint '{}' epoch:{}".format(saved_model_path, start_epoch)))
preds, names = test(test_loader, model,train_mean=train_mean, train_std=train_std)
df= | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
from .._utils import color_digits, color_background
from ..data import Data, DataSamples
#from ..woe import WOE
import pandas as pd
#import math as m
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from matplotlib.colors import LinearSegmentedColormap
import seaborn as sns
from sklearn.model_selection import cross_val_score, StratifiedKFold, train_test_split, GridSearchCV, PredefinedSplit
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import roc_auc_score, roc_curve, auc
#rom scipy.stats import chi2, chisquare, ks_2samp, ttest_ind
#import statsmodels.formula.api as sm
import warnings
from abc import ABCMeta, abstractmethod
#from sklearn.feature_selection import GenericUnivariateSelect, f_classif
from patsy import dmatrices
from statsmodels.stats.outliers_influence import variance_inflation_factor
import re
import ast
import os
import xlsxwriter
from PIL import Image
import datetime
from dateutil.relativedelta import *
import gc
#import weakref
import copy
import itertools
import calendar
#from ..cross import DecisionTree, Crosses
import networkx as nx
from operator import itemgetter
import matplotlib.ticker as mtick
try:
import fastcluster
except Exception:
print('For fullness analysis using hierarchical clustering please install fastcluster package.')
from scipy.cluster.hierarchy import fcluster
try:
import hdbscan
except Exception:
print('For fullness analysis using HDBSCAN clustering please install hdbscan package.')
from sklearn.cluster import KMeans
from sklearn.tree import export_graphviz
from os import system
from IPython.display import Image as Display_Image
#from joblib import Parallel, delayed
# Created by <NAME> and <NAME>
warnings.simplefilter('ignore')
plt.rc('font', family='Verdana')
plt.style.use('seaborn-darkgrid')
pd.set_option('display.precision', 3)
class Processor(metaclass = ABCMeta):
"""
Base class for processing objects of Data class
"""
@abstractmethod
def __init__(self):
'''
self.stats is a DataFrame with statistics about self.work()
'''
self.stats = pd.DataFrame()
@abstractmethod
def work(self, data, parameters):
pass
def param_dict_to_stats(self, data, d):
'''
TECH
Transforms a dict of parameters to self.stats
Parameters
-----------
data: Data object being processed
d: dictionary {action : list_of_features} where action is a string with action description and list_of_features is a list of features' names to apply the action to
'''
col1 = []
col2 = []
for (how, features) in d.items():
col1 = col1 + [how + ' (' + str(round(data.dataframe[features[i]].mean(), 3)) + ')' if how == 'mean' else how for i in range(len(features))]
col2 = col2 + features
self.stats = pd.DataFrame({'action' : col1, 'features': col2})
#---------------------------------------------------------------
class MissingProcessor(Processor):
'''
Class for missing values processing
'''
def __init__(self):
self.stats = pd.DataFrame()
def work(self, data, parameters, quantiles=100, precision=4):
'''
Deals with missing values
Parameters:
-----------
data: an object of Data type that should be processed
inplace: whether to change the data or to create a new Data object
parameters: {how_to_process : features_to_process}
how_to_process takes:
'delete' - to delete samples where the value of any feature from features_to_process is missing
'mean' - for each feature from features_to_process to fill missings with the mean value
'distribution' - for each feature from features_to_process to fill missings according to non-missing distribution
a value - for each feature from features_to_process to fill missings with this value
features_to_process takes list of features from data
quantiles: number of quantiles for 'distribution' type of missing process - all values are divided into quantiles,
then missing values are filled with average values of quantiles. If number of unique values is less then number of quantiles
or field type is not int, float, etc, then no quantiles are calculated - missings are filled with existing values according
to their frequency
precision: precision for quantile edges and average quantile values
Returns:
----------
A copy of data with missings processed for features mentioned in parameters
'''
for how in parameters:
if isinstance(parameters[how], str):
parameters[how] = [parameters[how]]
result = data.dataframe.copy()
for how in parameters:
if how == 'delete':
for feature in parameters[how]:
result = result[result[feature].isnull() == False]
if data.features != None and feature in data.features:
data.features.remove(feature)
elif how == 'mean':
for feature in parameters[how]:
result[feature].fillna(result[feature].mean(), inplace = True)
elif how == 'distribution':
for feature in parameters[how]:
if data.dataframe[feature].dtype not in (float, np.float32, np.float64, int, np.int32, np.int64) or data.dataframe[feature].unique().shape[0]<quantiles:
summarized=data.dataframe[[feature]].dropna().groupby(feature).size()
summarized=summarized.reset_index().rename({feature:'mean', 0:'size'}, axis=1)
else:
summarized=data.dataframe[[feature]].rename({feature:'value'}, axis=1).join(pd.qcut(data.dataframe[feature].dropna(), q=quantiles, precision=4, duplicates='drop')).groupby(feature).agg(['mean', 'size'])
summarized.columns=summarized.columns.droplevel()
summarized=summarized.reset_index(drop=True)
#summarized=summarized.reset_index()
summarized['p']=summarized['size']/summarized['size'].sum()
result[feature]=result[feature].apply(lambda x: np.random.choice(summarized['mean'].round(precision), p=summarized['p']) if pd.isnull(x) else x)
else:
result[parameters[how]] = result[parameters[how]].fillna(how)
# statistics added on Dec-04-2018
self.param_dict_to_stats(data, parameters)
return Data(result, data.target, data.features, data.weights, data.name)
#---------------------------------------------------------------
class StabilityAnalyzer(Processor):
'''
For stability analysis
'''
def __init__(self):
self.stats = pd.DataFrame({'sample_name' : [], 'parameter' : [], 'meaning': []})
def work(self, data, time_column, sample_name = None, psi = None, event_rate=None, normalized=True, date_format = "%d.%m.%Y", time_func = (lambda x: 100*x.year + x.month),
yellow_zone = 0.1, red_zone = 0.25, figsize = None, out = True, out_images = 'StabilityAnalyzer/', sep=';', base_period_index=0):
'''
Calculates the dynamic of feature (or groups of values) changes over time so it should be used only for discrete or WOE-transformed
features. There are 2 types of analysis:
PSI. Represents a heatmap (Stability Table) of features stability that contains 3 main zones: green (the feature is
stable), yellow (the feature is not very stable) and red (the feature is unstable). StabilityIndex (PSI) is calculated for each
time period relatively to the first period.
Stability index algorithm:
For each feature value and time period number of samples is calculated: e.g., N[i, t] is number of samples for value i and time period t.
StabilityIndex[t] = (N[i, t]/sum_i(N[i, t]) - (N[i, 0]/sum_i(N[i, 0])))* log(N[i, t]/sum_i(N[i, t])/(N[i, 0]/sum_i(N[i, 0])))
ER (event rate). Calculates average event rate and number of observations for each feature's value over time.
After calculation displays the Stability Table (a heatmap with stability indexes for each feature value and time period)
and Event rate graphs
Parameters:
-----------
data: data to analyze (type Data)
time_column: name of a column with time values to calculate time periods
sample_name: name of sample for report
psi: list of features for PSI analysis (if None then all features from the input Data object will be used)
event_rate: list of features for event rate and distribution in time analysis (if None then all features from the input Data object will be used)
date_format: format of time values in time_column. Codes for format:
%a Weekday as locale’s abbreviated name. Sun, Mon, …, Sat (en_US)
%A Weekday as locale’s full name. Sunday, Monday, …, Saturday (en_US)
%w Weekday as a decimal number, where 0 is Sunday and 6 is Saturday. 0, 1, …, 6
%d Day of the month as a zero-padded decimal number. 01, 02, …, 31
%b Month as locale’s abbreviated name. Jan, Feb, …, Dec (en_US)
%B Month as locale’s full name. January, February, …, December (en_US)
%m Month as a zero-padded decimal number. 01, 02, …, 12
%y Year without century as a zero-padded decimal number. 00, 01, …, 99
%Y Year with century as a decimal number. 1970, 1988, 2001, 2013
%H Hour (24-hour clock) as a zero-padded decimal number. 00, 01, …, 23
%I Hour (12-hour clock) as a zero-padded decimal number. 01, 02, …, 12
%p Locale’s equivalent of either AM or PM. AM, PM (en_US)
%M Minute as a zero-padded decimal number. 00, 01, …, 59
%S Second as a zero-padded decimal number. 00, 01, …, 59
%f Microsecond as a decimal number, zero-padded on the left. 000000, 000001, …, 999999
%z UTC offset in the form +HHMM or -HHMM (empty string if the the
object is naive). (empty), +0000, -0400, +1030
%Z Time zone name (empty string if the object is naive). (empty), UTC, EST, CST
%j Day of the year as a zero-padded decimal number. 001, 002, …, 366
%U Week number of the year (Sunday as the first day of the week)
as a zero padded decimal number. All days in a new year preceding
the first Sunday are considered to be in week 0. 00, 01, …, 53 (6)
%W Week number of the year (Monday as the first day of the week) as
a decimal number. All days in a new year preceding the first
Monday are considered to be in week 0. 00, 01, …, 53 (6)
%c Locale’s appropriate date and time representation. Tue Aug 16 21:30:00 1988 (en_US)
%x Locale’s appropriate date representation. 08/16/88 (None); 08/16/1988 (en_US)
%X Locale’s appropriate time representation. 21:30:00 (en_US)
time_func: function for time_column parsing (changes date to some value, representing time period) or
a period type for dt.to_period() function. Codes for available periods:
B business day frequency
C custom business day frequency (experimental)
D calendar day frequency
W weekly frequency
M month end frequency
BM business month end frequency
CBM custom business month end frequency
MS month start frequency
BMS business month start frequency
CBMS custom business month start frequency
Q quarter end frequency
BQ business quarter endfrequency
QS quarter start frequency
BQS business quarter start frequency
A year end frequency
BA business year end frequency
AS year start frequency
BAS business year start frequency
BH business hour frequency
H hourly frequency
T, min minutely frequency
S secondly frequency
L, ms milliseconds
U, us microseconds
N nanoseconds
yellow_zone: the lower border for the yellow stability zone ('not very stable') in percents of derivation
red_zone: the lower border for the red stability zone ('unstable') in percents of derivation
figsize: matplotlib figsize of the Stability Table
out: a boolean for image output or a path for xlsx output file to export the Stability Tables
out_images: a path for image output (default - StabilityAnalyzer/)
sep: the separator to be used in case of csv export
base_period_index: index of period (starting from 0) for other periods to compare with (0 for the first, -1 for the last)
'''
print('Warning: only for discrete features!!!')
if sample_name is None:
if pd.isnull(data.name):
sample_name = 'sample'
else:
sample_name = data.name
out_images = out_images + sample_name + '/'
self.stats = self.stats.append(pd.DataFrame({'sample_name' : [sample_name], 'parameter' : ['out'], 'meaning' : [out]}))
self.stats = self.stats.append(pd.DataFrame({'sample_name' : [sample_name], 'parameter' : ['out_images'], 'meaning' : [out_images]}))
psi = data.features.copy() if psi is None else [x for x in psi if x in data.features]
event_rate = data.features.copy() if event_rate is None else [x for x in event_rate if x in data.features]
all_features=list(set(psi+event_rate))
if figsize is None:
figsize=(12, max(1,round(len(psi)/2,0)))
if out==True or isinstance(out, str):
directory = os.path.dirname(out_images)
if not os.path.exists(directory):
os.makedirs(directory)
if isinstance(out, str):
writer = pd.ExcelWriter(out, engine='openpyxl')
tmp_dataset = data.dataframe[all_features + [time_column, data.target] + ([] if data.weights is None else [data.weights])].copy()
tmp_dataset[time_column] = pd.to_datetime(tmp_dataset[time_column], format=date_format, errors='coerce')
if callable(time_func):
tmp_dataset['tt'] = tmp_dataset[time_column].map(time_func)
elif isinstance(time_func, str):
try:
tmp_dataset['tt'] = tmp_dataset[time_column].dt.to_period(time_func).astype(str)
except Exception:
print('No function or correct period code was provided. Return None.')
return None
c = 0
for feature in sorted(all_features):
print (feature)
if data.weights is not None:
feature_stats=tmp_dataset[[feature, 'tt', data.target, data.weights]]
feature_stats['---weight---']=feature_stats[data.weights]
else:
feature_stats=tmp_dataset[[feature, 'tt', data.target]]
feature_stats['---weight---']=1
feature_stats[data.target]=feature_stats[data.target]*feature_stats['---weight---']
feature_stats=feature_stats[[feature, 'tt', data.target, '---weight---']].groupby([feature, 'tt'], as_index=False).\
agg({'---weight---':'size', data.target:'mean'}).rename({feature:'value', '---weight---':'size', data.target:'mean'}, axis=1)
feature_stats['feature']=feature
if c == 0:
all_stats = feature_stats
c = c+1
else:
all_stats = all_stats.append(feature_stats, ignore_index=True)
all_stats['size']=all_stats['size'].astype(float)
all_stats['mean']=all_stats['mean'].astype(float)
if len(psi)>0:
stability1=all_stats[all_stats.feature.isin(psi)][['feature', 'value', 'tt', 'size']].pivot_table(values='size', columns='tt', index=['feature', 'value']).reset_index().fillna(0)
stability1.columns.name=None
#display(stability1)
dates = stability1.drop(['feature', 'value'], 1).columns.copy()
stability2 = stability1[['feature', 'value']].copy()
for date in dates:
stability2[date] = list(stability1[date]/list(stability1.drop(['value'], 1).groupby(by = 'feature').sum()[date][:1])[0])
#display(stability2)
start_date = dates[base_period_index]
stability3 = stability2[['feature', 'value']]
for date in dates:
stability3[date] = round(((stability2[date]-stability2[start_date])*np.log(stability2[date]/stability2[start_date])).fillna(0), 2).replace([])
#display(stability3)
stability4 = stability3.drop(['value'], 1).groupby(by = 'feature').sum()
#display(stability4)
fig, ax = plt.subplots(figsize = figsize)
ax.set_facecolor("red")
sns.heatmap(stability4, ax=ax, yticklabels=stability4.index, annot = True, cmap = 'RdYlGn_r', center = yellow_zone, vmax = red_zone, linewidths = .05, xticklabels = True)
if out==True or isinstance(out, str):
plt.savefig(out_images+"stability.png", dpi=100, bbox_inches='tight')
plt.show()
if isinstance(out, str):
if out[-4:]=='.xls' or out[-5:]=='.xlsx':
stability4.style.apply(color_background,
mn=0, mx=red_zone, cntr=yellow_zone).to_excel(writer, engine='openpyxl', sheet_name='PSI')
worksheet = writer.sheets['PSI']
for x in worksheet.columns:
if x[0].column=='A':
worksheet.column_dimensions[x[0].column].width = 40
else:
worksheet.column_dimensions[x[0].column].width = 12
worksheet.freeze_panes = worksheet['B2']
else:
print('Unknown or unacceptable format for export several tables. Use .xlsx. Skipping export.')
if len(event_rate)>0:
for_event_rate=all_stats[all_stats['feature'].isin(event_rate)]
date_base=pd.DataFrame(all_stats['tt'].unique(), columns=['tt']).sort_values('tt')
for feature in sorted(for_event_rate['feature'].unique()):
cur_feature_data=for_event_rate[for_event_rate['feature']==feature].copy()
#display(cur_feature_data)
if normalized:
for tt in sorted(cur_feature_data['tt'].unique(), reverse=True):
cur_feature_data.loc[cur_feature_data['tt']==tt, 'percent']=cur_feature_data[cur_feature_data['tt']==tt]['size']/cur_feature_data[cur_feature_data['tt']==tt]['size'].sum()
#display(cur_feature_data)
fig, ax = plt.subplots(1,1, figsize=(15, 5))
ax2 = ax.twinx()
ax.grid(False)
ax2.grid(False)
sorted_values=sorted(cur_feature_data['value'].unique(), reverse=True)
for value in sorted_values:
to_visualize='percent' if normalized else 'size'
value_filter = (cur_feature_data['value']==value)
er=date_base.merge(cur_feature_data[value_filter], on='tt', how='left')['mean']
height=date_base.merge(cur_feature_data[value_filter], on='tt', how='left')[to_visualize].fillna(0)
bottom=date_base.merge(cur_feature_data[['tt',to_visualize]][cur_feature_data['value']>value].groupby('tt', as_index=False).sum(), on='tt', how='left')[to_visualize].fillna(0)
ax.bar(range(date_base.shape[0]), height, bottom=bottom if value!=sorted_values[0] else None, edgecolor='white', alpha=0.3)
ax2.plot(range(date_base.shape[0]), er, label=str(round(value,3)), linewidth=2)
plt.xticks(range(date_base.shape[0]), date_base['tt'])
fig.autofmt_xdate()
ax2.set_ylabel('Event Rate')
ax2.yaxis.set_major_formatter(FuncFormatter(lambda y, _: '{:.2%}'.format(y)))
if normalized:
ax.yaxis.set_major_formatter(FuncFormatter(lambda y, _: '{:.2%}'.format(y)))
ax2.annotate('Obs:', xy=(0, 1), xycoords=('axes fraction', 'axes fraction'), xytext=(-25, 5), textcoords='offset pixels', color='blue', size=11)
for i in range(date_base.shape[0]):
ax2.annotate(str(int(cur_feature_data[cur_feature_data['tt']==date_base['tt'][i]]['size'].sum())),
xy=(i, 1),
xycoords=('data', 'axes fraction'),
xytext=(0, 5),
textcoords='offset pixels',
#rotation=60,
ha='center',
#va='bottom',
color='blue',
size=11)
ax.set_ylabel('Total obs')
plt.xlabel(time_column)
plt.suptitle(feature + ' event rate in time' if callable(time_func) else feature + ' event rate in time, period = '+time_func)
handles, labels = ax2.get_legend_handles_labels()
ax2.legend(handles[::-1], labels[::-1], loc=0, fancybox=True, framealpha=0.3)
if out==True or isinstance(out, str):
plt.savefig(out_images+feature+".png", dpi=100, bbox_inches='tight')
plt.show()
if isinstance(out, str):
if out[-4:]=='.xls' or out[-5:]=='.xlsx':
event_rate_df=all_stats[['feature', 'value', 'tt', 'mean']].pivot_table(values='mean', columns='tt', index=['feature', 'value']).reset_index().fillna(0)
event_rate_df.columns.name=None
event_rate_df.style.apply(color_background,
mn=0, mx=all_stats['mean'].mean()+2*all_stats['mean'].std(), cntr=None,
cmap=matplotlib.cm.RdYlGn_r, subset=pd.IndexSlice[:, [x for x in event_rate_df.columns if x not in ['value','feature']]]).to_excel(writer, engine='openpyxl', sheet_name='Event Rate', index=False)
worksheet = writer.sheets['Event Rate']
for x in worksheet.columns:
if x[0].column=='A':
worksheet.column_dimensions[x[0].column].width = 40
else:
worksheet.column_dimensions[x[0].column].width = 12
if x[0].column!='B':
for cell in worksheet[x[0].column]:
if cell.row!=1:
cell.number_format = '0.000%'
worksheet.freeze_panes = worksheet['C2']
size_df=all_stats[['feature', 'value', 'tt', 'size']].pivot_table(values='size', columns='tt', index=['feature', 'value']).reset_index().fillna(0)
size_df.columns.name=None
size_df.style.apply(color_background,
mn=0, mx=all_stats['size'].mean()+2*all_stats['size'].std(), cntr=None,
cmap=matplotlib.cm.RdYlGn, subset=pd.IndexSlice[:, [x for x in size_df.columns if x not in ['value','feature']]]).to_excel(writer, engine='openpyxl', sheet_name='Observations', index=False)
worksheet = writer.sheets['Observations']
for x in worksheet.columns:
if x[0].column=='A':
worksheet.column_dimensions[x[0].column].width = 40
else:
worksheet.column_dimensions[x[0].column].width = 12
worksheet.freeze_panes = worksheet['C2']
else:
print('Unknown or unacceptable format for export several tables. Use .xlsx. Skipping export.')
if isinstance(out, str):
writer.close()
#---------------------------------------------------------------
class DataVisualizer(Processor):
'''
Supports different types of data visualization
'''
def __init__(self):
self.stats = pd.DataFrame()
def work(self, data, distribution = True, factorplot = True, factorplot_separate = False, pairplot = None,
out=False, out_images='DataVisualizer/', plot_cells=20, categorical=None):
'''
Produces distribution plot, factorplot, pairplot
Parameters:
-----------
data: data to visualize
distribution: parameter for a distribution plot,
if True - plot for data.features, if list - plot for features from the list, if False - do not use distribution plot
factorplot: parameter for a factorplot,
if True - plot for data.features, if list - plot for features from the list, if False - do not use factorplot
factorplot_separate: if True then separate plots for each target value
pairplot: list of features to make a pairplot for
out: a boolean for images output or a path for xlsx output file
out_images: a path for images output (default - DataVisualizer/)
plot_cells: how many cells would plots get in output excel
categorical: a list of features to be treated as categorical (countplots will be produced instead of distplots)
'''
if pairplot is None:
pairplot=[]
if categorical is None:
categorical=[]
dataframe_t = data.dataframe[data.features + [data.target]].copy()
data = Data(dataframe_t, features = data.features, target = data.target)
if out is not None:
if out==True or isinstance(out, str):
directory = os.path.dirname(out_images)
if not os.path.exists(directory):
os.makedirs(directory)
if isinstance(out, str):
# Create an new Excel file and add a worksheet.
workbook = xlsxwriter.Workbook(out)
worksheet = workbook.add_worksheet('Data Visualization')
# Widen the first column to make the text clearer.
worksheet.set_column('A:A', 100)
current_plot_number=0
if distribution:
print ('Distributions of features: ')
if type(distribution) == type([1, 1]):
features = distribution
else:
if data.features == None:
print ('No features claimed. Please set data.features = ')
return None
features = data.features
for feature in features:
current_plot_number=current_plot_number+1
if data.dataframe[feature].dtype==object or feature in categorical:
f, axes = plt.subplots()
sns.countplot(data.dataframe[feature].dropna())
f.autofmt_xdate()
else:
sns.distplot(data.dataframe[feature].dropna())
if data.dataframe[feature].isnull().any():
plt.title(feature+' (miss = ' + str(round(data.dataframe[feature].isnull().value_counts()[True]/data.dataframe.shape[0],3))+')')
else:
plt.title(feature+' (miss = 0)')
if out==True or isinstance(out, str):
plt.savefig(out_images+feature+"_d.png", dpi=100, bbox_inches='tight')
if isinstance(out, str):
scale=(20*plot_cells)/Image.open(out_images+feature+"_d.png").size[1]
worksheet.write((current_plot_number-1)*(plot_cells+1), 0, 'Distribution plot for '+feature+":")
worksheet.insert_image((current_plot_number-1)*(plot_cells+1)+1, 0, out_images+feature+"_d.png",
{'x_scale': scale, 'y_scale': scale})
plt.show()
print ('---------------------------------------\n')
if factorplot:
print ('Factorplot: ')
if type(factorplot) == type([1, 1]):
features = factorplot
else:
if data.features == None:
print ('No features claimed. Please set data.features = ')
return None
features = data.features
if factorplot_separate:
for feature in features:
current_plot_number=current_plot_number+1
# edited 21-Jun-2018 by <NAME>
f, axes = plt.subplots(data.dataframe[data.target].drop_duplicates().shape[0], 1, figsize=(4, 4), sharex=True)
f.autofmt_xdate()
#for target_v in data.dataframe[data.target].drop_duplicates():
targets = list(data.dataframe[data.target].drop_duplicates())
for target_i in range(len(targets)):
if data.dataframe[data.dataframe[data.target]==targets[target_i]][feature].isnull().any():
x_label=feature + ': ' + data.target + ' = ' + str(targets[target_i]) + ', miss = ' + str(round(data.dataframe[data.dataframe[data.target]==targets[target_i]][feature].isnull().value_counts()[True]/data.dataframe[data.dataframe[data.target]==targets[target_i]].shape[0],3))
else:
x_label=feature + ': ' + data.target + ' = ' + str(targets[target_i]) + ', miss = 0'
if data.dataframe[feature].dtype==object or feature in categorical:
ax=sns.countplot(data.dataframe[data.dataframe[data.target] == targets[target_i]][feature].dropna(),
ax=axes[target_i], color = 'm')
ax.set(xlabel=x_label)
else:
sns.distplot(data.dataframe[data.dataframe[data.target] == targets[target_i]][feature].dropna(),
ax=axes[target_i],
axlabel=x_label, color = 'm')
if out==True or isinstance(out, str):
plt.savefig(out_images+feature+"_f.png", dpi=100, bbox_inches='tight')
if isinstance(out, str):
scale=(20*plot_cells)/Image.open(out_images+feature+"_f.png").size[1]
worksheet.write((current_plot_number-1)*(plot_cells+1), 0, 'Factor plot for '+feature+":")
worksheet.insert_image((current_plot_number-1)*(plot_cells+1)+1, 0, out_images+feature+"_f.png",
{'x_scale': scale, 'y_scale': scale})
plt.show()
else:
for feature in features:
current_plot_number=current_plot_number+1
sns.factorplot(x=feature, hue = data.target, data = data.dataframe, kind='count', palette = 'Set1')
if out==True or isinstance(out, str):
plt.savefig(out_images+feature+"_f.png", dpi=100, bbox_inches='tight')
if isinstance(out, str):
scale=(20*plot_cells)/Image.open(out_images+feature+"_f.png").size[1]
worksheet.write((current_plot_number-1)*(plot_cells+1), 0, 'Factor plot for '+feature+":")
worksheet.insert_image((current_plot_number-1)*(plot_cells+1)+1, 0, out_images+feature+"_f.png",
{'x_scale': scale, 'y_scale': scale})
plt.show()
print ('---------------------------------------\n')
if pairplot != []:
current_plot_number=current_plot_number+1
print ('Pairplot')
sns.pairplot(data.dataframe[pairplot].dropna())
if out==True or isinstance(out, str):
plt.savefig(out_images+"pairplot.png", dpi=100, bbox_inches='tight')
if isinstance(out, str):
worksheet.write((current_plot_number-1)*(plot_cells+1), 0, 'Pair plot for '+str(pairplot)+":")
worksheet.insert_image((current_plot_number-1)*(plot_cells+1)+1, 0, out_images+"pairplot.png")
plt.show()
if isinstance(out, str):
workbook.close()
#---------------------------------------------------------------
class TargetTrendVisualizer(Processor):
'''
Supports target trend visualization
'''
def __init__(self):
self.stats = pd.DataFrame()
def work(self, data, features=None, quantiles=100, magnify_trend=False, magnify_std_number=2, hide_every_even_tick_from=50,
min_size=10, out=False, out_images='TargetTrendVisualizer/', plot_cells=20):
'''
Calculates specified quantiles/takes categories, calculates target rates and sizes, then draws target trends
Parameters:
-----------
data: an object of Data type
features: the list of features to visualize, can be omitted
quantiles: number of quantiles to cut feature values on
magnify_trend: if True, then axis scale for target rate will be corrected to exclude outliers
magnify_std_number: how many standard deviations should be included in magnified scale
hide_every_even_tick_from: if there is too many quantiles then every second tick on x axis will be hidden
out: a boolean for images output or a path for xlsx output file
out_images: a path for images output (default - TargetTrendVisualizer/)
plot_cells: how many cells would plots get in output excel
'''
if features is None:
cycle_features=data.features.copy()
else:
cycle_features=features.copy()
if out is not None:
if out==True or isinstance(out, str):
directory = os.path.dirname(out_images)
if not os.path.exists(directory):
os.makedirs(directory)
if isinstance(out, str):
# Create an new Excel file and add a worksheet.
workbook = xlsxwriter.Workbook(out)
worksheet = workbook.add_worksheet('Target Trend Visualization')
# Widen the first column to make the text clearer.
worksheet.set_column('A:A', 100)
current_feature_number=0
for f in cycle_features:
if f not in data.dataframe:
print('Feature', f, 'not in input dataframe. Skipping..')
else:
print('Processing', f,'..')
current_feature_number=current_feature_number+1
if data.dataframe[f].dtype not in (float, np.float32, np.float64, int, np.int32, np.int64) or data.dataframe[f].unique().shape[0]<quantiles:
summarized=data.dataframe[[f, data.target]].groupby([f]).agg(['mean', 'size'])
else:
if data.dataframe[f].dropna().shape[0]<min_size*quantiles:
current_quantiles=int(data.dataframe[f].dropna().shape[0]/min_size)
if current_quantiles==0:
print('The number of non-missing observations is less then', min_size,'. No trend to visualize.')
if isinstance(out, str):
worksheet.write((current_feature_number-1)*(plot_cells+1), 0, 'Target trend for '+f+":")
worksheet.write((current_feature_number-1)*(plot_cells+1)+1, 0, 'The number of non-missing observations is less then '+str(min_size)+'. No trend to visualize.')
continue
else:
print('Too few non-missing observations for', quantiles, 'quantiles. Calculating', current_quantiles, 'quantiles..')
else:
current_quantiles=quantiles
summarized=data.dataframe[[data.target]].join(pd.qcut(data.dataframe[f], q=current_quantiles, precision=4, duplicates='drop')).groupby([f]).agg(['mean', 'size'])
small_quantiles=summarized[data.target][summarized[data.target]['size']<min_size]['size']
#display(small_quantiles)
if small_quantiles.shape[0]>0:
current_quantiles=int(small_quantiles.sum()/min_size)+summarized[data.target][summarized[data.target]['size']>=min_size].shape[0]
print('There are quantiles with size less then', min_size,'. Attempting', current_quantiles, 'quantiles..')
summarized=data.dataframe[[data.target]].join(pd.qcut(data.dataframe[f], q=current_quantiles, precision=4, duplicates='drop')).groupby([f]).agg(['mean', 'size'])
summarized.columns=summarized.columns.droplevel()
summarized=summarized.reset_index()
if pd.isnull(data.dataframe[f]).any():
with_na=data.dataframe[[f,data.target]][pd.isnull(data.dataframe[f])]
summarized.loc[-1]=[np.nan, with_na[data.target].mean(), with_na.shape[0]]
summarized=summarized.sort_index().reset_index(drop=True)
if summarized.shape[0]==1:
print('Too many observations in one value, so only 1 quantile was created. Increasing quantile number is recommended. No trend to visualize.')
if isinstance(out, str):
worksheet.write((current_feature_number-1)*(plot_cells+1), 0, 'Target trend for '+f+":")
worksheet.write((current_feature_number-1)*(plot_cells+1)+1, 0, 'Too many observations in one value, so only 1 quantile was created. Increasing quantile number is recommended. No trend to visualize.')
continue
fig = plt.figure(figsize=(15,5))
ax = fig.add_subplot(111)
ax.set_ylabel('Observations')
# blue is for the distribution
if summarized.shape[0]>hide_every_even_tick_from:
plt.xticks(range(summarized.shape[0]), summarized[f].astype(str), rotation=60, ha="right")
xticks = ax.xaxis.get_major_ticks()
for i in range(len(xticks)):
if i%2==0:
xticks[i].label1.set_visible(False)
else:
plt.xticks(range(summarized.shape[0]), summarized[f].astype(str), rotation=45, ha="right")
ax.bar(range(summarized.shape[0]), summarized['size'], zorder=0, alpha=0.3)
ax.grid(False)
ax.grid(axis='y', zorder=1, alpha=0.6)
ax2 = ax.twinx()
ax2.set_ylabel('Target Rate')
ax2.grid(False)
#display(summarized)
if magnify_trend:
ax2.set_ylim([0, np.average(summarized['mean'], weights=summarized['size'])+magnify_std_number*np.sqrt(np.cov(summarized['mean'], aweights=summarized['size']))])
for i in range(len(summarized['mean'])):
if summarized['mean'][i]>np.average(summarized['mean'], weights=summarized['size'])+magnify_std_number*np.sqrt(np.cov(summarized['mean'], aweights=summarized['size'])):
ax2.annotate(str(round(summarized['mean'][i],4)),
xy=(i, np.average(summarized['mean'], weights=summarized['size'])+magnify_std_number*np.sqrt(np.cov(summarized['mean'], aweights=summarized['size']))),
xytext=(i, np.average(summarized['mean'], weights=summarized['size'])+(magnify_std_number+0.05)*np.sqrt(np.cov(summarized['mean'], aweights=summarized['size']))),
rotation=60,
ha='left',
va='bottom',
color='red',
size=8.5
)
# red is for the target rate values
ax2.plot(range(summarized.shape[0]), summarized['mean'], 'ro-', linewidth=2.0, zorder=4)
if out==True or isinstance(out, str):
plt.savefig(out_images+f+".png", dpi=100, bbox_inches='tight')
if isinstance(out, str):
scale=(20*plot_cells)/Image.open(out_images+f+".png").size[1]
worksheet.write((current_feature_number-1)*(plot_cells+1), 0, 'Target trend for '+f+":")
worksheet.insert_image((current_feature_number-1)*(plot_cells+1)+1, 0, out_images+f+".png",
{'x_scale': scale, 'y_scale': scale})
plt.show()
if isinstance(out, str):
workbook.close()
class CorrelationAnalyzer(Processor):
'''
Produces correlation analysis
'''
def __init__(self):
self.stats = pd.DataFrame()
def work(self, data, drop_features = True, features = None, features_to_leave = None, threshold=0.6, method = 'spearman',
drop_with_most_correlations=True, verbose=False, out_before=None, out_after=None, sep=';', cdict = None):
'''
Calculates the covariance matrix and correlation coefficients for each pair of features.
For each highly correlated pair the algorithm chooses the less significant feature and adds it to the delete list.
Parameters
-----------
data: a Data or DataSamples object to check (in case of DataSamples, train sample will be checked)
drop_features: permission to delete correlated features and return a Data object without them
features: a list of features to analyze; by default - all the features
features_to_leave: a list of features that must not be deleted from the feature list
threshold: the lowest value of a correlation coefficient for two features to be considered correlated
method: method for correlation calculation
drop_with_most_correlations: should the features with the highest number of correlations be excluded first (otherwise just with any number of correlations and the lowest gini)
verbose: flag for detailed output
out_before: file name for export of correlation table before feature exclusion (.csv and .xlsx types are supported)
out_after: file name for export of correlation table after feature exclusion (.csv and .xlsx types are supported)
sep: the separator in case of .csv export
Returns
--------
Resulting Data or DataSamples object and the correlation table
'''
if features is None:
features=[]
if features_to_leave is None:
features_to_leave=[]
self.stats = pd.DataFrame({'drop_features' : [drop_features], 'threshold' : [threshold], 'method' : [method], 'out_before' : out_before, 'out_after' : out_after})
if type(data)==DataSamples:
sample=data.train
else:
sample=data
if len(sample.ginis)==0:
print('No calculated ginis in datasamples.train/data object. Set calc_gini=True while using WOE.transform or use Data.calc_gini. Return None')
return None
if features == [] or features is None:
candidates = sample.features.copy()
else:
candidates = features.copy()
features_to_drop = []
correlations = sample.dataframe[candidates].corr(method = method)
cor_out=correlations.copy()
if cdict is None:
cdict = {'red' : ((0.0, 0.9, 0.9),
(0.5, 0.05, 0.05),
(1.0, 0.9, 0.9)),
'green': ((0.0, 0.0, 0.0),
(0.5, 0.8, 0.8),
(1.0, 0.0, 0.0)),
'blue' : ((0.0, 0.1, 0.1),
(0.5, 0.1, 0.1),
(1.0, 0.1, 0.1))}
#edited 21.08.2018 by <NAME> - added verbose variant, optimized feature dropping
# edited on Dec-06-18 by <NAME>: added png
draw_corr=correlations.copy()
draw_corr.index=[x+' (%i)' % i for i,x in enumerate(draw_corr.index)]
draw_corr.columns=range(len(draw_corr.columns))
if out_before is not None:
out_before_png = 'corr_before.png'
if out_before[-4:]=='.csv':
draw_corr.round(2).to_csv(out_before, sep = sep)
out_before_png = out_before[:-4] + '.png'
elif out_before[-5:]=='.xlsx' or out_before[-4:]=='.xls':
draw_corr.round(2).style.applymap(color_digits, threshold_red=threshold, threshold_yellow=threshold**2).to_excel(out_before, engine='openpyxl', sheet_name='Correlation (before)')
out_before_png = out_before[:-5] + '.png' if out_before[-5:]=='.xlsx' else out_before[:-4] + '.png'
elif out_before[-4:]=='.png':
out_before_png = out_before
else:
print('Unknown format for export file. Use .csv or .xlsx. Skipping export.')
fig_before = sns.heatmap(draw_corr.round(2), annot = True, cmap = LinearSegmentedColormap('mycmap', cdict), cbar = False, center = 0, yticklabels = True, xticklabels = True).figure
fig_before.set_size_inches(draw_corr.shape[0]/2, draw_corr.shape[0]/2)
fig_before.savefig(out_before_png, bbox_inches='tight')
plt.close()
self.stats['out_before'] = out_before_png
if verbose:
display(draw_corr.round(2).style.applymap(color_digits, threshold_red=threshold, threshold_yellow=threshold**2))
to_check_correlation=True
while to_check_correlation:
to_check_correlation=False
corr_number={}
significantly_correlated={}
for var in correlations:
var_corr=correlations[var].apply(lambda x: abs(x))
var_corr=var_corr[(var_corr.index!=var) & (var_corr>threshold)].sort_values(ascending=False).copy()
corr_number[var]=var_corr.shape[0]
significantly_correlated[var]=str(var_corr.index.tolist())
if drop_with_most_correlations:
with_correlation={x:sample.ginis[x] for x in corr_number if corr_number[x]==max({x:corr_number[x] for x in corr_number if x not in features_to_leave}.values()) and corr_number[x]>0 and x not in features_to_leave}
else:
with_correlation={x:sample.ginis[x] for x in corr_number if corr_number[x]>0 and x not in features_to_leave}
if len(with_correlation)>0:
feature_to_drop=min(with_correlation, key=with_correlation.get)
features_to_drop.append(feature_to_drop)
if verbose:
print('Dropping %(v)s because of high correlation with features: %(f)s (Gini=%(g)0.2f)' % {'v':feature_to_drop, 'f':significantly_correlated[feature_to_drop], 'g':with_correlation[feature_to_drop]})
correlations=correlations.drop(feature_to_drop,axis=1).drop(feature_to_drop,axis=0).copy()
to_check_correlation=True
draw_corr=correlations.copy()
draw_corr.index=[x+' (%i)' % i for i,x in enumerate(draw_corr.index)]
draw_corr.columns=range(len(draw_corr.columns))
out_after_png = 'corr_after.png'
if out_after is not None:
if out_after[-4:]=='.csv':
draw_corr.round(2).to_csv(out_after, sep = sep)
out_after_png = out_after[:-4] + '.png'
elif out_after[-5:]=='.xlsx' or out_after[-4:]=='.xls':
draw_corr.round(2).style.applymap(color_digits, threshold_red=threshold, threshold_yellow=threshold**2).to_excel(out_after, engine='openpyxl', sheet_name='Correlation (after)')
out_after_png = out_after[:-5] + '.png' if out_after[-5:]=='.xlsx' else out_after[:-4] + '.png'
elif out_after[-4:]=='.png':
out_after_png = out_after
else:
print('Unknown format for export file. Use .csv or .xlsx. Skipping export.')
#sns.heatmap(draw_corr.round(2), annot = True, cmap = 'RdBu_r', cbar = False, center = 0).figure.savefig(out_after_png, bbox_inches='tight')
fig_after = sns.heatmap(draw_corr.round(2), annot = True, cmap = LinearSegmentedColormap('mycmap', cdict), cbar = False, center = 0, yticklabels = True, xticklabels = True).figure
fig_after.set_size_inches(draw_corr.shape[0]/2, draw_corr.shape[0]/2)
fig_after.savefig(out_after_png, bbox_inches='tight')
plt.close()
if verbose:
display(draw_corr.round(2).style.applymap(color_digits, threshold_red=threshold, threshold_yellow=threshold**2))
self.stats['out_after'] = out_after_png
result_data = copy.deepcopy(data)
if drop_features:
result_data.features_exclude(features_to_drop, verbose=False)
if verbose:
print('Dropped (if drop_features=True):', features_to_drop)
return result_data, cor_out
def find_correlated_groups(self, data, features = None, features_to_leave = None, threshold=0.6, method = 'spearman',
verbose=False, figsize=(12,12), corr_graph_type='connected'):
'''
Calculates the covariance matrix and correlation coefficients for each pair of features and
returns groups of significantly correlated features
Parameters
-----------
data: a Data or DataSamples object to check (in case of DataSamples it's train sample will be checked)
features: a list of features to analyze; by default - all the features
features_to_leave: a list of features that must not be included in analysis
threshold: the lowest value of a correlation coefficient for two features to be considered correlated
method: method for correlation calculation
verbose: flag for detailed output
figsize: the size of correlation connections graph (printed if verbose)
corr_graph_type: type of connectivity to persue in finding groups of correlated features
'connected' - groups are formed from features directly or indirectly connected by singnificant correlation
'complete' - groups are formed from features that are directly connected to each other by significant
correlation (each pair of features from a group will have a significant connection)
Returns
--------
a list of lists representing correlated group
'''
if features is None:
features=[]
if features_to_leave is None:
features_to_leave=[]
if type(data)==DataSamples:
sample=data.train
else:
sample=data
if features == [] or features is None:
candidates = [x for x in sample.features if x not in features_to_leave]
else:
candidates = [x for x in features if x not in features_to_leave]
correlations = sample.dataframe[candidates].corr(method = method)
if verbose:
draw_corr=correlations.copy()
draw_corr.index=[x+' (%i)' % i for i,x in enumerate(draw_corr.index)]
draw_corr.columns=range(len(draw_corr.columns))
display(draw_corr.round(2).style.applymap(color_digits,threshold_red=threshold))
G=nx.Graph()
for i in range(correlations.shape[0]):
for j in range(i+1, correlations.shape[0]):
if correlations.loc[correlations.columns[i], correlations.columns[j]]>threshold:
G.add_nodes_from([correlations.columns[i], correlations.columns[j]])
G.add_edge(correlations.columns[i], correlations.columns[j], label=str(round(correlations.loc[correlations.columns[i], correlations.columns[j]],3)))
if verbose:
plt.figure(figsize=(figsize[0]*1.2, figsize[1]))
pos = nx.spring_layout(G, k=100)
edge_labels = nx.get_edge_attributes(G,'label')
nx.draw(G, pos, with_labels=True)
nx.draw_networkx_edge_labels(G, pos, edge_labels = edge_labels)
plt.margins(x=0.2)
plt.show()
correlated_groups=[]
if corr_graph_type=='connected':
for x in nx.connected_components(G):
correlated_groups.append(sorted(list(x)))
elif corr_graph_type=='complete':
for x in nx.find_cliques(G):
correlated_groups.append(sorted(x))
else:
print('Unknown correlation graph type. Please use "connected" or "complete". Return None.')
return None
return correlated_groups
#---------------------------------------------------------------
class VIF(Processor):
'''
Calculates variance inflation factor for each feature
'''
def __init__(self):
self.stats = pd.DataFrame()
def work(self, data, drop_features = False, features=None, features_to_leave=None, threshold = 5,
drop_with_highest_VIF=True, verbose=True, out=None, sep=';'):
'''
Parameters
-----------
data: a Data or DataSamples object to check VIF on (in case of DataSamples it's train sample will be checked)
drop_features: permition to delete excluded features and return a Data object without them
features: a list of features to analyze; by default - all the features
features_to_leave: a list of features that must not be deleted from the feature list
threshold: the lowest value of VIF for feature to be excluded
drop_with_highest_VIF: should the features with the highest VIF be excluded first (otherwise just with the lowest gini)
verbose: flag for detailed output
out: file name for export of VIF values (.csv and .xlsx types are supported)
sep: the separator in case of .csv export
Returns
---------
Data or DataSamples object without excluded features
A pandas DataFrame with VIF values on different iterations
'''
if features_to_leave is None:
features_to_leave=[]
self.stats = pd.DataFrame({'drop_features' : [drop_features], 'threshold' : [threshold], 'out' : [out]})
if type(data)==DataSamples:
sample=data.train
else:
sample=data
if len(sample.ginis)==0:
print('No calculated ginis in datasamples.train/data object. Set calc_gini=True while using WOE.transform or use Data.calc_gini. Return None')
return None
if features is None:
features = sample.features.copy()
features_to_drop = []
to_check_VIF = True
vifs_df=pd.DataFrame(index=features)
iteration=-1
while to_check_VIF:
to_check_VIF = False
iteration=iteration+1
s = sample.target + ' ~ '
for f in features:
s = s + f + '+'
s = s[:-1]
# Break into left and right hand side; y and X
y_, X_ = dmatrices(formula_like=s, data=sample.dataframe, return_type="dataframe")
# For each Xi, calculate VIF
vifs = {features[i-1]:variance_inflation_factor(X_.values, i) for i in range(1, X_.shape[1])}
vifs_df=vifs_df.join(pd.DataFrame(vifs, index=[iteration]).T)
if drop_with_highest_VIF:
with_high_vif={x:sample.ginis[x] for x in vifs if vifs[x]==max({x:vifs[x] for x in vifs if x not in features_to_leave}.values()) and vifs[x]>threshold and x not in features_to_leave}
else:
with_high_vif={x:sample.ginis[x] for x in vifs if vifs[x]>threshold and x not in features_to_leave}
if len(with_high_vif)>0:
feature_to_drop=min(with_high_vif, key=with_high_vif.get)
features_to_drop.append(feature_to_drop)
if verbose:
print('Dropping %(v)s because of high VIF (VIF=%(vi)0.2f, Gini=%(g)0.2f)' % {'v':feature_to_drop, 'vi':vifs[feature_to_drop], 'g':with_high_vif[feature_to_drop]})
features.remove(feature_to_drop)
to_check_VIF=True
result_data = copy.deepcopy(data)
if drop_features:
result_data.features_exclude(features_to_drop, verbose=False)
out_png = 'VIF.png'
if out is not None:
if out[-4:]=='.csv':
vifs_df.round(2).to_csv(out, sep = sep)
out_png = out[:-4] + '.png'
elif out[-5:]=='.xlsx' or out[-4:]=='.xls':
vifs_df.round(2).style.applymap(color_digits, threshold_red=threshold).to_excel(out, engine='openpyxl', sheet_name='Variance Inflation Factor')
out_png = out[:-5] + '.png' if out[-5:]=='.xlsx' else out[:-4] + '.png'
elif out[-4:] == '.png':
out_png = out
else:
print('Unknown format for export file. Use .csv or .xlsx. Skipping export.')
vif_fig = sns.heatmap(vifs_df.round(2).sort_values(0, ascending = False), xticklabels = False, annot = True,
cmap = 'RdYlBu_r',
cbar = False, vmax = 5, yticklabels = True).figure
vif_fig.set_size_inches(vifs_df.shape[0]/4, vifs_df.shape[0]/2)
vif_fig.savefig(out_png, bbox_inches='tight')
plt.close()
self.stats['out'] = out_png
if verbose:
display(vifs_df.round(2).style.applymap(color_digits, threshold_red=threshold))
print('Dropped (if drop_features=True):', features_to_drop)
return result_data, vifs_df
#---------------------------------------------------------------
class FeatureEncoder(Processor):
'''
For processing non-numeric features
'''
def __init__(self):
self.stats = pd.DataFrame()
def work(self, data, how_to_code, inplace = False):
'''
Parameters
-----------
data: data to process, Data type
how_to_code: a dictionary {how: features_list} where 'how' can be 'one_hot' or 'seq'(means 'sequential') and 'features_list' is a list of columns in data to process
inplace: whether to change the data or to create a new Data object
Returns
---------
Data with additional features and dictionary for sequantial encoding
'''
result = data.dataframe.copy()
feature_list = data.features.copy()
d = {}
for how in how_to_code:
if how == 'one_hot':
for feature in how_to_code[how]:
one_hot = pd.get_dummies(result[feature])
one_hot.columns = [feature + '_' + str(c) for c in one_hot.columns]
feature_list = feature_list + list(one_hot.columns)
result = result.join(one_hot)
elif how == 'seq':
for feature in how_to_code[how]:
for (i, j) in enumerate(result[feature].drop_duplicates()):
d[j] = i
result[feature + '_code'] = result[feature].apply(lambda x: d[x])
feature_list = feature_list + [feature + '_code']
else:
print ('Do not understand your command. Please use "one_hot" or "seq" for how_to_code. Good luck.')
return None
self.param_dict_to_stats(data, how_to_code)
# for sequential, saves actual encoding
self.stats.loc[self.stats.action == 'seq', 'action'] = str(d)
if inplace:
data = Data(result, features = feature_list, target = data.target, weights = data.weights)
return d
else:
return Data(result, features = feature_list, target = data.target, weights = data.weights), d
#---------------------------------------------------------------
# Author - <NAME>
class GiniChecker(Processor):
'''
Class for gini checking
'''
def __init__(self):
self.stats = pd.DataFrame()
def work(self, feature, datasamples, gini_threshold=5, gini_decrease_threshold=0.2, gini_increase_restrict=True, verbose=False, with_test=False,
out=False, out_images='GiniChecker/'):
'''
Checks if gini of the feature is significant and stable enough
Parameters
-----------
feature: an object of FeatureWOE type that should be checked
datasamples: an object of DataSamples type containing the samples to check input feature on
gini_threshold: gini on train and validate/95% bootstrap should be greater then this
gini_decrease_threshold: gini decrease from train to validate/95% bootstrap deviation from mean to mean should be greater then this
gini_increase_restrict: if gini increase should also be restricted
verbose: if comments and graphs should be printed
with_test: add checking of gini values on test (calculation is always on)
out: a boolean for image output or a path for csv/xlsx output file to export gini values
out_images: a path for image output (default - GiniChecker/)
Returns
----------
Boolean - whether the check was successful
and if isinstance(out,str) then dictionary of gini values for all available samples
'''
if out:
directory = os.path.dirname(out_images)
if not os.path.exists(directory):
os.makedirs(directory)
if verbose:
print('Checking', feature.feature)
gini_correct=True
d=feature.transform(datasamples.train, original_values=True)
fpr, tpr, _ = roc_curve(d.dataframe[d.target], -d.dataframe[feature.feature+'_WOE'])
gini_train= (2*auc(fpr, tpr)-1)*100
if verbose:
print('Train gini = '+str(round(gini_train,2)))
if gini_train<gini_threshold:
gini_correct=False
if verbose:
print('Train gini is less then threshold '+str(gini_threshold))
samples=[datasamples.validate, datasamples.test]
sample_names=['Validate', 'Test']
gini_values={'Train':gini_train}
for si in range(len(samples)):
if samples[si] is not None:
d=feature.transform(samples[si], original_values=True)
fpr, tpr, _ = roc_curve(d.dataframe[d.target], -d.dataframe[feature.feature+'_WOE'])
gini = (2*auc(fpr, tpr)-1)*100
gini_values[samples[si].name]=gini
if verbose:
print(samples[si].name+' gini = '+str(round(gini,2)))
if with_test or samples[si].name!='Test':
if gini<gini_threshold:
gini_correct=False
if verbose:
print(samples[si].name+' gini is less then threshold '+str(gini_threshold))
decrease=1-gini/gini_train
if decrease>gini_decrease_threshold:
gini_correct=False
if verbose:
print('Gini decrease from Train to '+samples[si].name+' is greater then threshold: '+str(round(decrease,5))+' > '+str(gini_decrease_threshold))
if gini_increase_restrict and -decrease>gini_decrease_threshold:
gini_correct=False
if verbose:
print('Gini increase from Train to '+samples[si].name+' is greater then threshold: '+str(round(-decrease,5))+' > '+str(gini_decrease_threshold))
else:
gini_values[sample_names[si]]=None
gini_list=[]
if datasamples.bootstrap_base is not None:
db=feature.transform(datasamples.bootstrap_base.keep(feature.feature), original_values=True)
for bn in range(len(datasamples.bootstrap)):
d=db.dataframe.iloc[datasamples.bootstrap[bn]]
fpr, tpr, _ = roc_curve(d[db.target], -d[feature.feature+'_WOE'])
roc_auc = auc(fpr, tpr)
gini_list.append(round((roc_auc*2 - 1)*100, 2))
mean=np.mean(gini_list)
std=np.std(gini_list)
if verbose:
sns.distplot(gini_list)
plt.axvline(x=mean, linestyle='--', alpha=0.5)
plt.text(mean, 0, ' Mean = '+str(round(mean,2))+', std = '+str(round(std,2)),
horizontalalignment='right', verticalalignment='bottom', rotation=90)
plt.xlabel('Gini values in bootstrap')
plt.ylabel('Distribution')
plt.title(feature.feature, fontsize = 16)
if out:
plt.savefig(out_images+feature.feature+".png", dpi=100, bbox_inches='tight')
plt.show()
if mean-1.96*std<gini_threshold:
gini_correct=False
if verbose:
print('Less then 95% of gini distribution is greater then threshold: (mean-1.96*std) '+str(round(mean-1.96*std,5))+' < '+str(gini_threshold))
val_decrease=1.96*std/mean
if val_decrease>gini_decrease_threshold:
gini_correct=False
if verbose:
print('Gini deviation from mean for 95% of distribution is greater then threshold: (1.96*std/mean) '+str(round(val_decrease,5))+' > '+str(gini_decrease_threshold))
if isinstance(out, str):
gini_values.update({'Bootstrap'+str(i):gini_list[i] for i in range(len(gini_list))})
return gini_correct, gini_values
else:
return gini_correct
#added 13.08.2018 by <NAME>
def work_all(self, woe, features=None, drop_features=False, gini_threshold=5, gini_decrease_threshold=0.2,
gini_increase_restrict=True, verbose=False, with_test=False, out=False, out_images='GiniChecker/', sep=';'):
'''
Checks if gini of all features from WOE object is significant and stable enough
Parameters
-----------
woe: an object of WOE type that should be checked
drop_features: should the features be dropped from WOE.feature_woes list in case of failed checks
gini_threshold: gini on train and validate/95% bootstrap should be greater then this
gini_decrease_threshold: gini decrease from train to validate/95% bootstrap deviation from mean to mean should be greater then this
gini_increase_restrict: if gini increase should also be restricted
verbose: if comments and graphs should be printed
with_test: add checking of gini values on test (calculation is always on)
out: a boolean for image output or a path for csv/xlsx output file to export gini values
out_images: a path for image output (default - GiniChecker/)
sep: the separator to be used in case of csv export
Returns
----------
Dictionary with results of check for all features from input WOE object
'''
if features is None:
cycle_features=list(woe.feature_woes)
else:
cycle_features=list(features)
not_in_features_woe=[x for x in cycle_features if x not in woe.feature_woes]
if len(not_in_features_woe)>0:
print('No', not_in_features_woe, 'in WOE.feature_woes. Abort.')
return None
if out:
directory = os.path.dirname(out_images)
if not os.path.exists(directory):
os.makedirs(directory)
gini_correct={}
if isinstance(out, str):
gini_df=pd.DataFrame(columns=['Train', 'Validate', 'Test']+['Bootstrap'+str(i) for i in range(len(woe.datasamples.bootstrap))])
for feature in cycle_features:
if isinstance(out, str):
gini_correct[feature], gini_values=self.work(woe.feature_woes[feature], datasamples=woe.datasamples, gini_threshold=gini_threshold,
gini_decrease_threshold=gini_decrease_threshold,
gini_increase_restrict=gini_increase_restrict, verbose=verbose, with_test=with_test,
out=out, out_images=out_images)
#print(feature, gini_values)
gini_df=gini_df.append(pd.DataFrame(gini_values, index=[feature]))
else:
gini_correct[feature]=self.work(woe.feature_woes[feature], datasamples=woe.datasamples, gini_threshold=gini_threshold,
gini_decrease_threshold=gini_decrease_threshold,
gini_increase_restrict=gini_increase_restrict, verbose=verbose, with_test=with_test,
out=out, out_images=out_images)
if isinstance(out, str):
gini_df=gini_df[['Train', 'Validate', 'Test']+['Bootstrap'+str(i) for i in range(len(woe.datasamples.bootstrap))]].dropna(axis=1)
if out[-4:]=='.csv':
gini_df.to_csv(out, sep = sep)
elif out[-4:]=='.xls' or out[-5:]=='.xlsx':
writer = pd.ExcelWriter(out, engine='openpyxl')
gini_df.style.apply(color_background,
mn=gini_df.min().min(), mx=gini_df.max().max(), cmap='RdYlGn').to_excel(writer, sheet_name='Gini by Samples')
# Get the openpyxl objects from the dataframe writer object.
worksheet = writer.sheets['Gini by Samples']
for x in worksheet.columns:
worksheet.column_dimensions[x[0].column].width = 40 if x[0].column=='A' else 12
writer.save()
else:
print('Unknown format for export file. Use .csv or .xlsx. Skipping export.')
if drop_features:
woe.excluded_feature_woes.update({x:woe.feature_woes[x] for x in woe.feature_woes if gini_correct[x]==False})
woe.feature_woes={x:woe.feature_woes[x] for x in woe.feature_woes if gini_correct[x]}
return gini_correct
def work_tree(self, dtree, input_df=None, gini_threshold=5, gini_decrease_threshold=0.2, gini_increase_restrict=True,
verbose=False, with_test=False, out=False):
'''
Checks if gini of the tree is significant and stable enough
Parameters
-----------
dtree: a cross.DecisionTree object
input_df: a DataFrame, containing tree description
datasamples: an object of DataSamples type containing the samples to check input tree on
gini_threshold: gini on train and validate/95% bootstrap should be greater then this
gini_decrease_threshold: gini decrease from train to validate/95% bootstrap deviation from mean to mean should be greater then this
gini_increase_restrict: if gini increase should also be restricted
verbose: if comments and graphs should be printed
with_test: add checking of gini values on test (calculation is always on)
out: a boolean flag for gini values output
Returns
----------
Boolean - whether the check was successful
and if out==True then dictionary of gini values for all available samples
'''
if input_df is None:
tree_df=dtree.tree.copy()
else:
tree_df=input_df.copy()
datasamples=dtree.datasamples
features=[x for x in dtree.features if x in tree_df]
#[x for x in tree_df.columns[:tree_df.columns.get_loc('node')] if tree_df[x].dropna().shape[0]>0]
if verbose:
print('Checking tree on', str(features))
gini_correct=True
samples=[datasamples.train, datasamples.validate, datasamples.test]
sample_names=['Train', 'Validate', 'Test']
gini_values={}
for si in range(len(samples)):
if samples[si] is not None:
to_check=samples[si].keep(features=features).dataframe
to_check['woe']=dtree.transform(to_check, tree_df, ret_values=['woe'])
fpr, tpr, _ = roc_curve(to_check[samples[si].target], -to_check['woe'])
gini = (2*auc(fpr, tpr)-1)*100
gini_values[samples[si].name]=gini
if verbose:
print(samples[si].name+' gini = '+str(round(gini,2)))
if with_test or samples[si].name!='Test':
if gini<gini_threshold:
gini_correct=False
if verbose:
print(samples[si].name+' gini is less then threshold '+str(gini_threshold))
if samples[si].name!='Train':
decrease=1-gini/gini_values['Train']
if decrease>gini_decrease_threshold:
gini_correct=False
if verbose:
print('Gini decrease from Train to '+samples[si].name+' is greater then threshold: '+str(round(decrease,5))+' > '+str(gini_decrease_threshold))
if gini_increase_restrict and -decrease>gini_decrease_threshold:
gini_correct=False
if verbose:
print('Gini increase from Train to '+samples[si].name+' is greater then threshold: '+str(round(-decrease,5))+' > '+str(gini_decrease_threshold))
else:
gini_values[sample_names[si]]=None
gini_list=[]
if datasamples.bootstrap_base is not None:
base_with_woe=datasamples.bootstrap_base.keep(features=features).dataframe
base_with_woe['woe']=dtree.transform(base_with_woe, tree_df, ret_values=['woe'])
for bn in range(len(datasamples.bootstrap)):
to_check=base_with_woe.iloc[datasamples.bootstrap[bn]]
fpr, tpr, _ = roc_curve(to_check[datasamples.bootstrap_base.target], -to_check['woe'])
roc_auc = auc(fpr, tpr)
gini_list.append(round((roc_auc*2 - 1)*100, 2))
mean=np.mean(gini_list)
std=np.std(gini_list)
if verbose>True:
sns.distplot(gini_list)
plt.axvline(x=mean, linestyle='--', alpha=0.5)
plt.text(mean, 0, ' Mean = '+str(round(mean,2))+', std = '+str(round(std,2)),
horizontalalignment='right', verticalalignment='bottom', rotation=90)
plt.xlabel('Gini values in bootstrap')
plt.ylabel('Distribution')
plt.title('Tree on '+str(features), fontsize = 16)
plt.show()
elif verbose:
print('Bootstrap: mean = '+str(round(mean,2))+', std = '+str(round(std,2)))
if mean-1.96*std<gini_threshold:
gini_correct=False
if verbose:
print('Less then 95% of gini distribution is greater then threshold: (mean-1.96*std) '+str(round(mean-1.96*std,5))+' < '+str(gini_threshold))
val_decrease=1.96*std/mean
if val_decrease>gini_decrease_threshold:
gini_correct=False
if verbose:
print('Gini deviation from mean for 95% of distribution is greater then threshold: (1.96*std/mean) '+str(round(val_decrease,5))+' > '+str(gini_decrease_threshold))
if out:
gini_values.update({'Bootstrap'+str(i):gini_list[i] for i in range(len(gini_list))})
return gini_correct, gini_values
else:
return gini_correct
#---------------------------------------------------------------
# Author - <NAME>
class BusinessLogicChecker(Processor):
'''
Class for business logic checking
'''
def __init__(self):
self.stats = pd.DataFrame()
def work(self, feature, conditions='', verbose=False, out=None):
'''
Checks if the business logic condition is True
Parameters
-----------
feature: an object of FeatureWOE type that should be checked
conditions: a string with business logic conditions
for feature.categorical==True: 'cond_1;cond_2;...;cond_n', where cond_i
is 'A sign B', where A and B
are comma-separated lists of values (or nothing, but not both at the same time)
and where sign
is one of the following: <, >, =, <=, >=
each condition compares risk of bins with values from A to risk of bins with values from B (if B is omitted,
then risk of bins with values from A is compared to risk of bins with values not in A);
> means that risk of the second values group is smaller then the risk of the first values group (and values from
different groups cannot be in one bin), < means the opposite (again, values from different groups cannot be in one
bin), adding = allows values from different groups to be in one bin;
ALL of the conditions should be True or conditions should be empty for the input feature to pass the check
-----------------------------------------------------------------------------------------------------------
for feature.categorical==False:'cond_1;cond_2;....;cond_n (excl_1;...;excl_n)', where cond_i
is 'sign_1 value_2 sign_2 value_3 sign_3 ... value_n sign_n', where sign_i
is one of the following: <, >
and where value_i
is a float/int and can be omitted
and where excl_i
is a float/int and can be omitted (if there is not excl_i at all, then parentheses can be omitted too)
each condition describes how should risk be changing when feature values are increasing;
> means that risk will be monotonicaly decreasing with increase of values, < means the opposite, >< means that
risk decrease and then increase, adding value between signs tells, that in the bin with this value should be
the local risk extremum (>N< means that the bin with N in it should have the least risk);
adding values in () will result in exclusion of bins with these values before risk trend checking (and so bins
with these values are ignored);
each condition should start with a sign and end with a sign, one sign is permitter, values between signs
can be omitted;
ANY one of the conditions should be True for the input feature to pass the check
in case of conditions==None or conditions=='' checker wil return True is risk trend is monotonicaly
increasing/decresing (the same check will be processed if only values to exclude are provided)
verbose: if comments and graphs should be printed
out: a path for csv/xlsx output file to export business logic check results
Returns
----------
Boolean - whether the check was successful
and if out is not None then dataframe of check log
'''
if out is not None:
out_df=pd.DataFrame(columns=['feature', 'categorical', 'condition', 'fact', 'condition_result'])
if feature.categorical == False:
woes_dropna={feature.groups[x][0]:feature.woes[x] for x in feature.woes if isinstance(feature.groups[x],list)}
groups_info=pd.DataFrame(woes_dropna, index=['woe']).transpose().reset_index().rename({'index':'lower'}, axis=1)
groups_info['upper']=groups_info['lower'].shift(-1).fillna(np.inf)
if groups_info.shape[0]==1:
if verbose:
print('Only 1 group with non-missing values is present. Skipping trend check..')
all_cond_correct=True
else:
all_cond_correct=False
for c in conditions.split(';'):
#find all floats/ints between > and < - minimal risk
#first there should be >, then + or - or nothing, then at least one digit, then . or , or nothing, then zero or more digits and < after that
min_risk = re.findall('(?<=>)[-+]?\d+[.,]?\d*(?=<)', c)
#find all floats/ints between < and > - maximal risk
max_risk = re.findall('(?<=<)[-+]?\d+[.,]?\d*(?=>)', c)
#find all floats/ints between ( and ), ( and ; or ; and ) - values to exclude (without risk checking)
excl_risk = re.findall('(?<=[(;])[-+]?\d+[.,]?\d*(?=[;)])', c)
clear_condition=''.join(x for x in c if x in '<>')
gi_check=groups_info.dropna(how='all', subset=['lower','upper'])[['woe','lower','upper']].copy()
for excl in excl_risk:
gi_check=gi_check[((gi_check['lower']<=float(excl)) & (gi_check['upper']>float(excl)))==False]
gi_check['risk_trend']=np.sign((gi_check['woe']-gi_check['woe'].shift(1)).dropna()).apply(lambda x: '+' if (x<0) else '-' if (x>0) else '0')
trend=gi_check['risk_trend'].str.cat()
reg_exp=r''
for s in clear_condition:
if s=='>':
reg_exp=reg_exp+r'-+'
if s=='<':
reg_exp=reg_exp+r'\++'
if len(reg_exp)==0:
reg_exp='-*|\+*'
if re.fullmatch(reg_exp, trend):
trend_correct=True
if verbose:
print('Risk trend in data is consistent with input trend: input ', clear_condition, ', data ', trend)
else:
trend_correct=False
if verbose:
print('Risk trend in data is not consistent with input trend: input ', clear_condition, ', data ', trend)
#local risk minimums
min_risk_data=gi_check[(gi_check['risk_trend']=='-') & (gi_check['risk_trend'].shift(-1)=='+')].reset_index(drop=True)
min_risk_correct=True
for mr in range(len(min_risk)):
if mr+1<=min_risk_data.shape[0]:
if verbose:
print(feature.feature+': checking min risk in', min_risk[mr], '(between ', min_risk_data['lower'].loc[mr], ' and ', min_risk_data['upper'].loc[mr], ')')
min_risk_correct=min_risk_correct and (float(min_risk[mr])>=min_risk_data['lower'].loc[mr] and float(min_risk[mr])<min_risk_data['upper'].loc[mr])
else:
if verbose:
print(feature.feature+': not enough minimums in data to check', min_risk[mr])
min_risk_correct=False
#local risk maximums
max_risk_data=gi_check[(gi_check['risk_trend']=='+') & (gi_check['risk_trend'].shift(-1)=='-')].reset_index(drop=True)
max_risk_correct=True
for mr in range(len(max_risk)):
if mr+1<=max_risk_data.shape[0]:
if verbose:
print(feature.feature+': checking max risk in', max_risk[mr], '(between ', max_risk_data['lower'].loc[mr], ' and ', max_risk_data['upper'].loc[mr], ')')
max_risk_correct=max_risk_correct and (float(max_risk[mr])>=max_risk_data['lower'].loc[mr] and float(max_risk[mr])<max_risk_data['upper'].loc[mr])
else:
if verbose:
print(feature.feature+': not enough maximums in data to check', max_risk[mr])
min_risk_correct=False
all_cond_correct=all_cond_correct or (trend_correct and min_risk_correct and max_risk_correct)
if out is not None:
out_df=out_df.append(dict(feature=feature.feature, categorical=feature.categorical, condition=c, fact=trend, condition_result=trend_correct and min_risk_correct and max_risk_correct), ignore_index=True)
if verbose:
if all_cond_correct:
print(feature.feature+': business logic check succeeded.')
else:
fig=plt.figure(figsize=(5,0.5))
plt.plot(range(len(groups_info.dropna(how='all', subset=['lower','upper'])['lower'])),
groups_info.dropna(how='all', subset=['lower','upper'])['woe'], color='red')
plt.xticks(range(len(groups_info.dropna(how='all', subset=['lower','upper'])['lower'])),
round(groups_info.dropna(how='all', subset=['lower','upper'])['lower'],3))
plt.ylabel('WoE')
fig.autofmt_xdate()
plt.show()
print(feature.feature+': business logic check failed.')
if out is not None:
return all_cond_correct, out_df[['feature', 'categorical', 'condition', 'fact', 'condition_result']]
else:
return all_cond_correct
else:
all_cond_correct=True
if conditions!='':
w={}
for x in feature.groups:
for y in feature.groups[x]:
w[y]=feature.woes[x]
groups_info=pd.DataFrame(w, index=['woe']).transpose().reset_index().rename({'index':'categories'}, axis=1)
groups_info=groups_info[groups_info['categories']!=-np.inf].reset_index(drop=True).copy()
cond_types2=['>=','=>','<=','=<']
cond_types1=['>','<','=']
for c in conditions.split(';'):
c0=[]
c1=[]
cond_type=[x for x in cond_types2 if x in c]
if len(cond_type)==0:
cond_type=[x for x in cond_types1 if x in c]
cond_type=cond_type[0]
if cond_type in ['>=', '=>', '>']:
c0=ast.literal_eval('['+c[:c.find(cond_type)]+']')
c1=ast.literal_eval('['+c[c.find(cond_type)+len(cond_type):]+']')
elif cond_type in ['<=', '=<', '<']:
c0=ast.literal_eval('['+c[c.find(cond_type)+len(cond_type):]+']')
c1=ast.literal_eval('['+c[:c.find(cond_type)]+']')
elif cond_type=='=':
c0=ast.literal_eval('['+c[c.find(cond_type)+len(cond_type):]+']')
c1=ast.literal_eval('['+c[:c.find(cond_type)]+']')
can_be_equal=('=' in cond_type)
groups_info['risk_group']=groups_info['categories'].apply(lambda x: 0 if (x in c0 or (len(c0)==0 and x not in c1)) else 1 if (x in c1 or (len(c1)==0 and x not in c0)) else np.nan)
cond_correct = (cond_type!='=' and groups_info[groups_info['risk_group']==0]['woe'].max()<groups_info[groups_info['risk_group']==1]['woe'].min()) or (can_be_equal and (groups_info[groups_info['risk_group']==0]['woe'].max()==groups_info[groups_info['risk_group']==1]['woe'].min() or c0==c1))
all_cond_correct=all_cond_correct and cond_correct
if verbose:
print(feature.feature+': checking condition '+ c + ' => ' + str(cond_correct))
if out is not None:
out_df=out_df.append(dict(feature=feature.feature, categorical=feature.categorical, condition=c, fact='', condition_result=cond_correct), ignore_index=True)
if verbose:
print(feature.feature+': conditions ' + conditions + ' => ' + str(all_cond_correct))
else:
if verbose:
print(feature.feature+': no conditions were specified, business logic check succeeded.')
if out is not None:
return all_cond_correct, out_df[['feature', 'categorical', 'condition', 'fact', 'condition_result']]
else:
return all_cond_correct
#added 13.08.2018 by <NAME>
def work_all(self, woe, features=None, input_conditions=None, drop_features=False, verbose=False, out=None, sep=';'):
'''
Checks if business logic conditions for all features from the WOE object are True
Parameters
-----------
woe: an object of FeatureWOE type that should be checked
input_conditions: adress for excel-file with business logic conditions (columns 'variable' and 'condition' are mandatory)
drop_features: should the features be dropped from WOE.feature_woes list in case of failed checks
verbose: if comments and graphs should be printed
out: a path for csv/xlsx output file to export business logic check results
sep: the separator to be used in case of csv export
Returns
----------
Dictionary with results of check for all features from input WOE object
'''
if out is not None:
out_df=pd.DataFrame(columns=['feature', 'categorical', 'condition', 'fact', 'condition_result', 'overall_result'])
if features is None:
cycle_features=list(woe.feature_woes)
else:
cycle_features=list(features)
not_in_features_woe=[x for x in cycle_features if x not in woe.feature_woes]
if len(not_in_features_woe)>0:
print('No', not_in_features_woe, 'in self.feature_woes. Abort.')
return None
business_logic_correct={}
'''
if conditions_dict is not None:
if isinstance(conditions_dict, dict):
conditions_dict=pd.DataFrame(conditions_dict, index=['conditions']).T
elif isinstance(conditions_dict, str) and (conditions_dict[-5:]=='.xlsx' or conditions_dict[-4:]=='.xls'):
try:
conditions=pd.read_excel(conditions_dict).set_index('variable')
conditions['conditions']=conditions['conditions'].apply(lambda x: '' if (pd.isnull(x)) else x)
except Exception:
print('No conditions dictionary was found / no "variable" or "conditions" fields were found. Abort.')
return None
elif isinstance(conditions_dict, str):
conditions_dict=pd.DataFrame({x:conditions_dict for x in cycle_features},
index=['conditions']).T
else:
conditions=pd.DataFrame()
'''
if input_conditions is None:
conditions_dict=pd.DataFrame(columns=['feature', 'conditions'])
elif isinstance(input_conditions, dict) or isinstance(input_conditions, pd.DataFrame):
conditions_dict=input_conditions.copy()
elif isinstance(input_conditions, str):
if input_conditions[-4:]=='.csv':
conditions_dict=pd.read_csv(input_conditions, sep = sep)
elif input_conditions[-4:]=='.xls' or input_conditions[-5:]=='.xlsx':
conditions_dict=pd.read_excel(input_conditions)
else:
print('Unknown format for path to conditions dictionary file. Return None.')
elif isinstance(input_conditions, tuple):
conditions_dict={x:input_conditions[0] if x not in woe.categorical else input_conditions[1] for x in cycle_features}
else:
print('Unknown format for conditions dictionary file. Return None')
return None
if isinstance(conditions_dict, pd.DataFrame):
for v in ['feature', 'variable', 'var']:
if v in conditions_dict:
break
try:
conditions_dict=dict(conditions_dict.fillna('').set_index(v)['conditions'])
except Exception:
print("No 'feature' ,'variable', 'var' or 'conditions' field in input pandas.DataFrame. Return None.")
return None
for feature in cycle_features:
if feature not in conditions_dict:
current_conditions=''
else:
current_conditions=conditions_dict[feature]
if out is not None:
business_logic_correct[feature], out_feature_df=self.work(woe.feature_woes[feature], conditions=current_conditions, verbose=verbose, out=out)
out_feature_df['overall_result']=business_logic_correct[feature]
out_df=out_df.append(out_feature_df, ignore_index=True)
else:
business_logic_correct[feature]=self.work(woe.feature_woes[feature], conditions=current_conditions, verbose=verbose, out=out)
if drop_features:
woe.excluded_feature_woes.update({x:woe.feature_woes[x] for x in woe.feature_woes if business_logic_correct[x]==False})
woe.feature_woes={x:woe.feature_woes[x] for x in woe.feature_woes if business_logic_correct[x]}
if out is not None:
out_df=out_df[['feature', 'categorical', 'condition', 'fact', 'condition_result', 'overall_result']]
#display(out_df)
if out[-4:]=='.csv':
out_df.to_csv(out, sep = sep)
elif out[-4:]=='.xls' or out[-5:]=='.xlsx':
writer = pd.ExcelWriter(out, engine='openpyxl')
out_df.style.apply(self.color_result, subset=pd.IndexSlice[:,['condition_result', 'overall_result']]).to_excel(writer, sheet_name='Business Logic', index=False)
# Get the openpyxl objects from the dataframe writer object.
worksheet = writer.sheets['Business Logic']
for x in worksheet.columns:
worksheet.column_dimensions[x[0].column].width = 40 if x[0].column=='A' else 20
writer.save()
else:
print('Unknown format for export file. Use .csv or .xlsx. Skipping export.')
return business_logic_correct
def work_tree(self, dtree, input_df=None, input_conditions=None, max_corrections=None, sep=';', to_correct=False, verbose=False):
'''
Checks if the business logic conditions are True in every node of the input tree and corrects the tree for it to pass the check
Parameters
-----------
dtree: a cross.DecisionTree object to check
input_df: a DataFrame, containing tree description
input_conditions: a DataFrame, a dictionary or a string with a path to conditions dictionary (in case of DataFrame or string
the field with features' names should be called 'feature', 'variable' or 'var')
for categorical features: 'cond_1;cond_2;...;cond_n', where cond_i
is 'A sign B', where A and B
are comma-separated lists of values (or nothing, but not both at the same time)
and where sign
is one of the following: <, >, =, <=, >=
each condition compares risk of bins with values from A to risk of bins with values from B (if B is omitted,
then risk of bins with values from A is compared to risk of bins with values not in A);
> means that risk of the second values group is smaller then the risk of the first values group (and values from
different groups cannot be in one bin), < means the opposite (again, values from different groups cannot be in one
bin), adding = allows values from different groups to be in one bin;
ALL of the conditions should be True or conditions should be empty for the input feature to pass the check
-----------------------------------------------------------------------------------------------------------
for interval features:'cond_1;cond_2;....;cond_n (excl_1;...;excl_n)', where cond_i
is 'sign_1 sign_2 sign_3 ... sign_n', where sign_i
is one of the following: <, >
and where excl_i
is a float/int and can be omitted (if there is not excl_i at all, then parentheses can be omitted too)
each condition describes how should risk be changing when feature values are increasing;
> means that risk will be monotonicaly decreasing with increase of values, < means the opposite, >< means that
risk decrease and then increase, values between signs will be ignored because for most of nodes entire sample won't be
available for division and extremum values' absense or the presence of new local extremums should not be prohibited;
adding values in () will result in exclusion of bins with these values before risk trend checking (and so bins
with these values are ignored);
each condition should start with a sign and end with a sign, one sign is permitted;
ANY one of the conditions should be True for the input feature to pass the check
in case of conditions==None or conditions=='' checker wil return True is risk trend is monotonicaly
increasing/decresing (the same check will be processed if only values to exclude are provided)
max_corrections: maximal number of corrections in attempt to change the tree so it will pass the check
sep: a separator in case of csv import for conditions dictionary
to_correct: should there be attempts to correct tree by uniting nodes or not
verbose: if comments and graphs should be printed
Returns
----------
if to_correct:
True and a DataFrame with tree description - corrected or initial
else:
result of the input tree check and the input tree itself
'''
#-----------------------------------------------Subsidiary functions--------------------------------------------------
def bl_check_categorical(df, conditions, verbose=False, missing_group_is_correct=True):
'''
TECH
Check correctness of conditions for a categorical feature
Parameters
-----------
df: a DataFrame, containing lists of categories and WoE values
conditions: a string, containing business logic conditions for a feature
verbose: if comments should be printed
missing_group_is_correct: should missing of any value from condition in input data be considered as
successful check or not
Returns
----------
boolean flag of successful check
'''
all_cond_correct=True
if conditions!='':
tree_df=df.copy()
#display(tree_df)
cat_woes=[]
for i in tree_df.index:
categories, n, w = tree_df.loc[i]
#display(tree_df.loc[i])
#display(categories)
for c in categories:
cat_woes.append([c, n, w])
groups_info=pd.DataFrame(cat_woes, columns=['categories', 'nodes', 'woe'])
#display(groups_info)
cond_types2=['>=','=>','<=','=<']
cond_types1=['>','<','=']
for c in conditions.split(';'):
c0=[]
c1=[]
cond_type=[x for x in cond_types2 if x in c]
if len(cond_type)==0:
cond_type=[x for x in cond_types1 if x in c]
cond_type=cond_type[0]
if cond_type in ['>=', '=>', '>']:
c0=ast.literal_eval('['+c[:c.find(cond_type)]+']')
c1=ast.literal_eval('['+c[c.find(cond_type)+len(cond_type):]+']')
elif cond_type in ['<=', '=<', '<']:
c0=ast.literal_eval('['+c[c.find(cond_type)+len(cond_type):]+']')
c1=ast.literal_eval('['+c[:c.find(cond_type)]+']')
elif cond_type=='=':
c0=ast.literal_eval('['+c[c.find(cond_type)+len(cond_type):]+']')
c1=ast.literal_eval('['+c[:c.find(cond_type)]+']')
can_be_equal=('=' in cond_type)
groups_info['risk_group']=groups_info['categories'].apply(lambda x: 0 if (x in c0 or (len(c0)==0 and x not in c1)) else 1 if (x in c1 or (len(c1)==0 and x not in c0)) else np.nan)
cond_correct = (cond_type!='=' and groups_info[groups_info['risk_group']==0]['woe'].max()<groups_info[groups_info['risk_group']==1]['woe'].min()) or \
(can_be_equal and (groups_info[groups_info['risk_group']==0]['woe'].max()==groups_info[groups_info['risk_group']==1]['woe'].min() or c0==c1)) or \
(missing_group_is_correct and len(groups_info['risk_group'].dropna().unique())<2)
all_cond_correct=all_cond_correct and cond_correct
if verbose:
print('\tChecking condition '+ c + ' => ' + str(cond_correct))
if verbose:
print('\tConditions ' + conditions + ' => ' + str(all_cond_correct))
elif verbose:
print('\tNo conditions were specified, business logic check succeeded.')
return all_cond_correct
def bl_check_interval(df, conditions, verbose=False):
'''
TECH
Check correctness of conditions for an interval feature
Parameters
-----------
df: a DataFrame, containing intervals' descriptions and WoE values
conditions: a string, containing business logic conditions for a feature
verbose: if comments should be printed
Returns
----------
boolean flag of successful check
'''
tree_df=df.copy()
split_feature=tree_df.columns[0]
groups_info=tree_df[pd.isnull(tree_df[split_feature])==False]
groups_info['upper']=groups_info[split_feature].apply(lambda x: x[0][1] if pd.isnull(x[1]) else x[1])
groups_info['lower']=groups_info[split_feature].apply(lambda x: x[0][0] if pd.isnull(x[1]) else x[0])
#display(groups_info)
if groups_info.shape[0]==1:
if verbose:
print('\tOnly 1 group with non-missing values is present. Skipping trend check..')
all_cond_correct=True
else:
all_cond_correct=False
for c in conditions.split(';'):
#find all floats/ints between > and < - minimal risk
#first there should be >, then + or - or nothing, then at least one digit, then . or , or nothing, then zero or more digits and < after that
#min_risk = re.findall('(?<=>)[-+]?\d+[.,]?\d*(?=<)', c)
#find all floats/ints between < and > - maximal risk
#max_risk = re.findall('(?<=<)[-+]?\d+[.,]?\d*(?=>)', c)
#find all floats/ints between ( and ), ( and ; or ; and ) - values to exclude (without risk checking)
excl_risk = re.findall('(?<=[(;])[-+]?\d+[.,]?\d*(?=[;)])', c)
clear_condition=''.join(x for x in c if x in '<>')
gi_check=groups_info.dropna(how='all', subset=['lower','upper'])[['woe','lower','upper']].copy()
for excl in excl_risk:
gi_check=gi_check[((gi_check['lower']<=float(excl)) & (gi_check['upper']>float(excl)))==False]
gi_check['risk_trend']=np.sign((gi_check['woe']-gi_check['woe'].shift(1)).dropna()).apply(lambda x: '+' if (x<0) else '-' if (x>0) else '0')
trend=gi_check['risk_trend'].str.cat()
reg_exp=r''
for s in clear_condition:
if s=='>':
reg_exp=reg_exp+r'-+'
if s=='<':
reg_exp=reg_exp+r'\++'
if len(reg_exp)==0:
reg_exp='-*|\+*'
if re.fullmatch(reg_exp, trend):
trend_correct=True
if verbose:
print('\tRisk trend in data is consistent with input trend: input ', clear_condition, ', data ', trend)
else:
trend_correct=False
if verbose:
print('\tRisk trend in data is not consistent with input trend: input ', clear_condition, ', data ', trend)
'''#local risk minimums
min_risk_data=gi_check[(gi_check['risk_trend']=='-') & (gi_check['risk_trend'].shift(-1)=='+')].reset_index(drop=True)
min_risk_correct=True
for mr in range(len(min_risk)):
if mr+1<=min_risk_data.shape[0]:
if verbose:
print('\tChecking min risk in', min_risk[mr], '(between ', min_risk_data['lower'].loc[mr], ' and ', min_risk_data['upper'].loc[mr], ')')
min_risk_correct=min_risk_correct and (float(min_risk[mr])>=min_risk_data['lower'].loc[mr] and float(min_risk[mr])<min_risk_data['upper'].loc[mr])
else:
if verbose:
print('\tNot enough minimums in data to check', min_risk[mr])
min_risk_correct=False
#local risk maximums
max_risk_data=gi_check[(gi_check['risk_trend']=='+') & (gi_check['risk_trend'].shift(-1)=='-')].reset_index(drop=True)
max_risk_correct=True
for mr in range(len(max_risk)):
if mr+1<=max_risk_data.shape[0]:
if verbose:
print('\tChecking max risk in', max_risk[mr], '(between ', max_risk_data['lower'].loc[mr], ' and ', max_risk_data['upper'].loc[mr], ')')
max_risk_correct=max_risk_correct and (float(max_risk[mr])>=max_risk_data['lower'].loc[mr] and float(max_risk[mr])<max_risk_data['upper'].loc[mr])
else:
if verbose:
print('\tNot enough maximums in data to check', max_risk[mr])
min_risk_correct=False
all_cond_correct=all_cond_correct or (trend_correct and min_risk_correct and max_risk_correct)'''
all_cond_correct=all_cond_correct or trend_correct
if verbose:
if all_cond_correct:
print('\tBusiness logic check succeeded.')
else:
fig=plt.figure(figsize=(5,0.5))
plt.plot(range(len(groups_info.dropna(how='all', subset=['lower','upper'])['lower'])),
groups_info.dropna(how='all', subset=['lower','upper'])['woe'], color='red')
plt.xticks(range(len(groups_info.dropna(how='all', subset=['lower','upper'])['lower'])),
round(groups_info.dropna(how='all', subset=['lower','upper'])['lower'],3))
plt.ylabel('WoE')
fig.autofmt_xdate()
plt.show()
print('\tBusiness logic check failed.')
return all_cond_correct
def bl_recursive_correct(tree_df, node, allowed_corrections=1, corrections=None, conditions='', max_corrections=1,
verbose=False):
'''
TECH
Recursive search of corrections needed for tree to pass business logic checks
Parameters
-----------
tree_df: a DataFrame, containing tree description
node: a node number, whose children are corrected and checked
allowed_corrections: a number of remaining corrections, that are allowed
max_corrections: maximal number of corrections in attempt to change the tree so it will pass the check
corrections: the list of current corrections
conditions: a string, containing business logic conditions for a feature, by which current node was split
verbose: if comments and graphs should be printed
Returns
----------
boolean flag of corrected tree passing the check and
the list of corrections, that were made
'''
if corrections is None:
corrections=[]
split_feature=tree_df[(tree_df['node']==node)]['split_feature'].values[0]
if allowed_corrections>0:
possible_nodes_to_correct=sorted(tree_df[(tree_df['parent_node']==node)]['node'].tolist())
combinations=[]
for n1 in range(len(possible_nodes_to_correct)):
for n2 in range(len(possible_nodes_to_correct[n1+1:])):
if dtree.check_unitability(tree_df, [possible_nodes_to_correct[n1], possible_nodes_to_correct[n1+1:][n2]]):
first_condition=tree_df[(tree_df['node']==possible_nodes_to_correct[n1])][split_feature].values[0]
if not(isinstance(first_condition, list) or isinstance(first_condition, tuple)):
nodes_combination=[possible_nodes_to_correct[n1+1:][n2], possible_nodes_to_correct[n1]]
else:
nodes_combination=[possible_nodes_to_correct[n1], possible_nodes_to_correct[n1+1:][n2]]
combinations.append([nodes_combination,
abs(tree_df[tree_df['node']==possible_nodes_to_correct[n1]]['woe'].values[0]- \
tree_df[tree_df['node']==possible_nodes_to_correct[n1+1:][n2]]['woe'].values[0])])
combinations.sort(key=itemgetter(1))
for nodes_to_unite, woe in combinations:
if verbose:
print('Checking (',(max_corrections-allowed_corrections+1),'): for node', node, 'uniting children', str(nodes_to_unite), 'with woe difference =', woe)
tree_df_corrected=dtree.unite_nodes(tree_df, nodes_to_unite)
#display(tree_df_corrected)
if tree_df_corrected.shape[0]!=tree_df.shape[0]:
correct, final_corrections=bl_recursive_correct(tree_df_corrected, node, allowed_corrections-1, corrections+[nodes_to_unite],
conditions, max_corrections=max_corrections, verbose=verbose)
else:
correct=False
if correct:
return correct, final_corrections
else:
return False, corrections
else:
df_to_check=tree_df[(tree_df['parent_node']==node)][[split_feature, 'node', 'woe']]
categorical=sum([isinstance(x, list) for x in df_to_check[split_feature]])>0
if verbose:
print('Node', node, split_feature, (': Checking categorical business logic..' if categorical \
else ': Checking interval business logic..'))
correct=bl_check_categorical(df_to_check, conditions, verbose=verbose) if categorical \
else bl_check_interval(df_to_check, conditions, verbose=verbose)
return correct, corrections
#---------------------------------------------------------------------------------------------------------------------
if input_df is None:
tree_df=dtree.tree.copy()
else:
tree_df=input_df.copy()
features=[x for x in dtree.features if x in tree_df]
if input_conditions is None:
conditions_dict=pd.DataFrame(columns=['feature', 'conditions'])
elif isinstance(input_conditions, dict) or isinstance(input_conditions, pd.DataFrame):
conditions_dict=input_conditions.copy()
elif isinstance(input_conditions, str):
if input_conditions[-4:]=='.csv':
conditions_dict=pd.read_csv(input_conditions, sep = sep)
elif input_conditions[-4:]=='.xls' or input_conditions[-5:]=='.xlsx':
conditions_dict= | pd.read_excel(input_conditions) | pandas.read_excel |
import streamlit as st
import random
import psycopg2
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
from sqlalchemy.types import Integer
from streamlit.report_thread import get_report_ctx
#from streamlit.report_thread import add_script_run_ctx
import pydeck as pdk
from datasets import load_dataset
import pymongo
from pymongo import MongoClient
import codecs
import pickle
import json
import requests
import math
import os
import re
import certifi
# Load NTDB-GPT-2 model through huggingface API
API_URL = "https://api-inference.huggingface.co/models/dracoglacius/NTDB-GPT2"
headers = {"Authorization": "Bearer <KEY>"}
eval_file = open("clinical_eval.json", 'r')
data = json.loads(eval_file.read())
def query(payload):
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()
# Get validation sequences path
#valid_seqs_name = "valid_noestart.txt"
#valid_seqs_path = os.path.join(os.getcwd(), valid_seqs_name)
# Load ICD9 dictionaries for sequence translations
dcode_dict_name = "dcode_dict.txt"
pcode_dict_name = "pcode_dict.txt"
dcode_dict_path = os.path.join(os.getcwd(), dcode_dict_name)
pcode_dict_path = os.path.join(os.getcwd(), pcode_dict_name)
with open(dcode_dict_path, "rb") as fp:
icd9_dcode_dict = pickle.load(fp)
with open(pcode_dict_path, "rb") as fp:
icd9_pcode_dict = pickle.load(fp)
# Load validation dataset to sample/generate from
#start_idx = 0
#end_idx = 10
#datasets = load_dataset("text", data_files={"validation": valid_seqs_path})
#datasetseq = list(datasets['validation'][:].values())[0]
#datasetseq = datasetseq[:1000]
#dataset_len = len(datasetseq)
if 'data' not in st.session_state:
st.session_state.data = [val for val in data]
random.shuffle(st.session_state.data)
st.session_state.labels = [val['label'] for val in st.session_state.data] # will be sent to database
st.session_state.seqs = [val['seq'] for val in st.session_state.data]
#st.session_state.rated_seqs = []
#cleaned_datasetseq = [seq.split() for seq in datasetseq]
# MongoDB connection for sending userfeedback to
client = pymongo.MongoClient("mongodb+srv://stemmler:<EMAIL>/StemmlerProject?retryWrites=true&w=majority&tlsAllowInvalidCertificates=true", tlsCAFile=certifi.where())
#db = client.test
# Get the database URL for heroku postgres
DATABASE_URL = os.environ['DATABASE_URL'] # comment if testing locally
# Get a unique session ID that can used at postgres primary key
def get_session_id() -> str:
session_id = get_report_ctx().session_id
session_id = session_id.replace('-','_')
session_id = '_id_' + session_id # postgres name convention
return session_id
# Functions to read/write states of user input and dataframes
def write_state(column, value, engine, session_id):
engine.execute("UPDATE %s SET %s='%s'" % (session_id, column, value))
def write_state_df(df:pd.DataFrame, engine, session_id):
df.to_sql('%s' % (session_id),engine,index=False,if_exists='replace',chunksize=1000, dtype={str(session_id): Integer()})
def read_state(column, engine, session_id):
state_var = engine.execute("SELECT %s FROM %s" % (column, session_id))
state_var = state_var.first()[0]
return state_var
def read_state_df(engine, session_id):
try:
df = pd.read_sql_table(session_id, con=engine)
except:
df = pd.DataFrame([])
return df
# Retrieve session ID
session_id = get_session_id()
if 'mongo_data' not in st.session_state:
st.session_state.mongo_data = {session_id:[]}
# Translation functions
def translate_dpcode_seq(list_seq, pstart_idx, dcode_dict, pcode_dict):
translation = []
for dcode in list_seq[:pstart_idx]:
dcode = re.sub('[.]', '', dcode)
if dcode in dcode_dict:
translation.append(dcode_dict[dcode])
for pcode in list_seq[pstart_idx+1:]:
pcode = re.sub('[.]', '', pcode)
if pcode in pcode_dict:
translation.append(pcode_dict[pcode])
return translation
def remove_period_from_seq_and_translate(list_seq, translations, dcode_dict, pcode_dict):
if '<PSTART>' in list_seq:
pstart_idx = list_seq.index('<PSTART>')
else:
pstart_idx = len(list_seq) - 1
translation = translate_dpcode_seq(list_seq, pstart_idx, dcode_dict, pcode_dict)
translations.append(translation)
#for seq in cleaned_datasetseq:
# my_translations.append(seq[1:-1])
# remove_period_from_seq_and_translate(seq, my_translations, icd9_dcode_dict, icd9_pcode_dict)
if __name__ == '__main__':
feedback_db = "user_feedback"
engine = create_engine(DATABASE_URL, connect_args={'sslmode':'require'}) # uncomment along with line 75 for deployment
#engine = create_engine('sqlite:///testDB.db') # comment when done with local changes
mongo_db = client[feedback_db] # user_feedback DB that all feedback is sent to
mongo_feedback_collection = mongo_db[session_id] # each person's session ID is used to create a collection inside the feedback DB
# create state tables of session
engine.execute("CREATE TABLE IF NOT EXISTS %s (size text)" % (session_id))
len_table = engine.execute("SELECT COUNT(*) FROM %s" % (session_id))
len_table = len_table.first()[0]
if len_table == 0:
engine.execute("INSERT INTO %s (size) VALUES ('1')" % (session_id))
# can now create pages
page = st.sidebar.selectbox("Select page:", ("About", "NTDB-GPT-2","Inference","Evaluation"))
# Import README markdown file
read_me_file_name = "README.md"
read_me_file_path = os.path.join(os.getcwd(), read_me_file_name)
read_me_file = open(read_me_file_path)
read_me = read_me_file.read()
if page == "About":
st.markdown(read_me)
elif page == "NTDB-GPT-2":
st.header("Work in progress")
elif page == "Inference":
st.subheader("Here are some sample sequences to generate from:")
#for seq in cleaned_datasetseq[3:]:
#st.text(' '.join(seq))
query_str = st.text_input("Enter a sequence stub starting with <START> ECODE ...")
if st.button("Generate"):
output = query(query_str)
output_tt = output[0].values()
output_tt = list(output_tt)
output_tt = output_tt[0].split(' ')
translations = []
#translations.append(output_tt[1:-1])
remove_period_from_seq_and_translate(output_tt, translations, icd9_dcode_dict, icd9_pcode_dict)
st.write(output[0])
st.write(translations[-1])
elif page == "Evaluation":
clin_loe = st.selectbox('What is your clinical level of education?',
('Medical Student', 'Resident/Fellow', 'Attending'))
st.subheader("Given the following stem consisting of an presenting injury (START) and diagnosis (DXS) do the following procedures (PRS) make clinical sense? Rate the 3 sequences below! (If no procedures are listed, is lack of surgical intervention a valid outcome?)\n")
col_1, col_2 = st.columns(2)
#seq_samples = random.sample(range(dataset_len), 3)
#for idx, seq in enumerate(st.session_state.seq_samples):
# st.session_state.seq_samples[idx] += 1
#st.write(seq_samples)
#idxs = []
rated_seqs = []
for idx, seq in enumerate(st.session_state.seqs[:5],1):
#idxs.append(idx)
#list_seq = my_translations[idx]
#list_trans = my_translations[idx+1]
rated_seqs.append(str(st.session_state.labels[idx-1])+"_seq_:"+str(seq)+"_realistic_:")
#rated_trans.append(list_trans)
#input_seq = list_seq[:3]
#input_seq = ' '.join(input_seq)
with st.container():
with col_1:
st.header(f"Sequence {idx}:")
st.text(seq) # exclude start/end in output
#print(str(list_seq))
#with col_2:
# st.text(st.session_state.seqs[:5])
# seq_1_plaus = st.checkbox("Realistic?")
# seq_2_plaus = st.checkbox("Realistic?")
# seq_3_plaus = st.checkbox("Realistic?")
#st.header(f"Translation {idx}:")
#st.write(list_trans)
form = st.form("checkboxes", clear_on_submit = True)
with form:
seq_1_plaus = st.checkbox(f"Is sequence 1 realistic?")
seq_2_plaus = st.checkbox(f"Is sequence 2 realistic?")
seq_3_plaus = st.checkbox(f"Is sequence 3 realistic?")
seq_4_plaus = st.checkbox(f"Is sequence 4 realistic?")
seq_5_plaus = st.checkbox(f"Is sequence 5 realistic?")
submit = form.form_submit_button("Submit your answers")
#write_state("size", data, engine, session_id)
#size = int(read_state("size", engine, session_id))
if submit:
rated_seqs[0] += str(seq_1_plaus)
rated_seqs[1] += str(seq_2_plaus)
rated_seqs[2] += str(seq_3_plaus)
rated_seqs[3] += str(seq_4_plaus)
rated_seqs[4] += str(seq_5_plaus)
st.session_state.mongo_data[session_id].extend(rated_seqs)
data = {'clin_LOE':clin_loe, 'rated_seqs':st.session_state.mongo_data[session_id]}
df = | pd.DataFrame(data) | pandas.DataFrame |
import argparse
import datetime
import os
import shutil
import unittest
from unittest import mock
import pandas
from matrix.common import date
from matrix.common.request.request_tracker import Subtask
from matrix.common.query.cell_query_results_reader import CellQueryResultsReader
from matrix.common.query.feature_query_results_reader import FeatureQueryResultsReader
from matrix.docker.matrix_converter import main, MatrixConverter, SUPPORTED_FORMATS
from matrix.docker.query_runner import QueryType
class TestMatrixConverter(unittest.TestCase):
def setUp(self):
self.test_manifest = {
"columns": ["a", "b", "c"],
"part_urls": ["A", "B", "C"],
"record_count": 5
}
args = ["test_id", "test_exp_manifest", "test_cell_manifest",
"test_gene_manifest", "test_target", "loom", "."]
parser = argparse.ArgumentParser()
parser.add_argument("request_id")
parser.add_argument("expression_manifest_key")
parser.add_argument("cell_metadata_manifest_key")
parser.add_argument("gene_metadata_manifest_key")
parser.add_argument("target_path")
parser.add_argument("format", choices=SUPPORTED_FORMATS)
parser.add_argument("working_dir")
self.args = parser.parse_args(args)
self.matrix_converter = MatrixConverter(self.args)
@mock.patch("os.remove")
@mock.patch("matrix.common.request.request_tracker.RequestTracker.creation_date", new_callable=mock.PropertyMock)
@mock.patch("matrix.common.request.request_tracker.RequestTracker.complete_request")
@mock.patch("matrix.common.request.request_tracker.RequestTracker.complete_subtask_execution")
@mock.patch("matrix.docker.matrix_converter.MatrixConverter._upload_converted_matrix")
@mock.patch("matrix.docker.matrix_converter.MatrixConverter._to_loom")
@mock.patch("matrix.common.query.query_results_reader.QueryResultsReader._parse_manifest")
def test_run(self,
mock_parse_manifest,
mock_to_loom,
mock_upload_converted_matrix,
mock_subtask_exec,
mock_complete_request,
mock_creation_date,
mock_os_remove):
mock_parse_manifest.return_value = self.test_manifest
mock_creation_date.return_value = date.to_string(datetime.datetime.utcnow())
mock_to_loom.return_value = "local_matrix_path"
self.matrix_converter.run()
mock_manifest_calls = [
mock.call("test_cell_manifest"),
mock.call("test_exp_manifest"),
mock.call("test_gene_manifest")
]
mock_parse_manifest.assert_has_calls(mock_manifest_calls)
mock_to_loom.assert_called_once()
mock_subtask_exec.assert_called_once_with(Subtask.CONVERTER)
mock_complete_request.assert_called_once()
mock_upload_converted_matrix.assert_called_once_with("local_matrix_path", "test_target")
@mock.patch("s3fs.S3FileSystem.open")
def test__n_slices(self, mock_open):
manifest_file_path = "tests/functional/res/cell_metadata_manifest"
with open(manifest_file_path) as f:
mock_open.return_value = f
self.matrix_converter.query_results = {
QueryType.CELL: CellQueryResultsReader("test_manifest_key")
}
self.assertEqual(self.matrix_converter._n_slices(), 8)
def test__make_directory(self):
self.assertEqual(os.path.isdir('test_target'), False)
results_dir = self.matrix_converter._make_directory()
self.assertEqual(os.path.isdir('test_target'), True)
shutil.rmtree(results_dir)
def test__zip_up_matrix_output(self):
results_dir = self.matrix_converter._make_directory()
shutil.copyfile('LICENSE', './test_target/LICENSE')
path = self.matrix_converter._zip_up_matrix_output(results_dir, ['LICENSE'])
self.assertEqual(path, './test_target.zip')
os.remove('./test_target.zip')
@mock.patch("pandas.DataFrame.to_csv")
@mock.patch("matrix.common.query.feature_query_results_reader.FeatureQueryResultsReader.load_results")
@mock.patch("matrix.common.query.query_results_reader.QueryResultsReader._parse_manifest")
def test__write_out_gene_dataframe__with_compression(self, mock_parse_manifest, mock_load_results, mock_to_csv):
self.matrix_converter.query_results = {
QueryType.FEATURE: FeatureQueryResultsReader("test_manifest_key")
}
results_dir = self.matrix_converter._make_directory()
mock_load_results.return_value = pandas.DataFrame()
results = self.matrix_converter._write_out_gene_dataframe(results_dir, 'genes.csv.gz', compression=True)
self.assertEqual(type(results).__name__, 'DataFrame')
mock_load_results.assert_called_once()
mock_to_csv.assert_called_once_with('./test_target/genes.csv.gz',
compression='gzip',
index_label='featurekey',
sep='\t')
shutil.rmtree(results_dir)
@mock.patch("pandas.DataFrame.to_csv")
@mock.patch("matrix.common.query.feature_query_results_reader.FeatureQueryResultsReader.load_results")
@mock.patch("matrix.common.query.query_results_reader.QueryResultsReader._parse_manifest")
def test__write_out_gene_dataframe__without_compression(self, mock_parse_manifest, mock_load_results, mock_to_csv):
self.matrix_converter.query_results = {
QueryType.FEATURE: FeatureQueryResultsReader("test_manifest_key")
}
results_dir = self.matrix_converter._make_directory()
mock_load_results.return_value = pandas.DataFrame()
results = self.matrix_converter._write_out_gene_dataframe(results_dir, 'genes.csv', compression=False)
self.assertEqual(type(results).__name__, 'DataFrame')
mock_load_results.assert_called_once()
mock_to_csv.assert_called_once_with('./test_target/genes.csv', index_label='featurekey')
shutil.rmtree(results_dir)
@mock.patch("pandas.DataFrame.reindex")
@mock.patch("pandas.DataFrame.to_csv")
def test__write_out_cell_dataframe__with_compression(self, mock_to_csv, mock_reindex):
mock_reindex.return_value = pandas.DataFrame()
results_dir = './test_target'
results = self.matrix_converter._write_out_cell_dataframe(results_dir,
'cells.csv.gz',
pandas.DataFrame(),
[],
compression=True)
self.assertEqual(type(results).__name__, 'DataFrame')
mock_reindex.assert_called_once()
mock_to_csv.assert_called_once_with('./test_target/cells.csv.gz',
compression='gzip',
index_label='cellkey',
sep='\t')
@mock.patch("pandas.DataFrame.reindex")
@mock.patch("pandas.DataFrame.to_csv")
def test__write_out_cell_dataframe__without_compression(self, mock_to_csv, mock_reindex):
mock_reindex.return_value = | pandas.DataFrame() | pandas.DataFrame |
from scipy.signal import butter, lfilter, resample, firwin, decimate
from sklearn.decomposition import FastICA, PCA
from sklearn import preprocessing
import numpy as np
import pandas as np
import matplotlib.pyplot as plt
import scipy
import pandas as pd
class SpectrogramImage:
"""
Plot spectrogram for each channel and convert it to numpy image array.
"""
def __init__(self, size=(224, 224, 4)):
self.size = size
def get_name(self):
return 'img-spec-{}'.format(self.size)
def drop_zeros(self, df):
return df[(df.T != 0).any()]
def apply(self, data):
data = pd.DataFrame(data.T)
data = self.drop_zeros(data)
channels = []
for col in data.columns:
plt.ioff()
_, _, _, _ = plt.specgram(data[col], NFFT=2048, Fs=240000/600, noverlap=int((240000/600)*0.005), cmap=plt.cm.spectral)
plt.axis('off')
plt.savefig('spec.png', bbox_inches='tight', pad_inches=0)
plt.close()
im = scipy.misc.imread('spec.png', mode='RGB')
im = scipy.misc.imresize(im, (224, 224, 3))
channels.append(im)
return channels
class UnitScale:
"""
Scale across the last axis.
"""
def get_name(self):
return 'unit-scale'
def apply(self, data):
return preprocessing.scale(data, axis=data.ndim - 1)
class UnitScaleFeat:
"""
Scale across the first axis, i.e. scale each feature.
"""
def get_name(self):
return 'unit-scale-feat'
def apply(self, data):
return preprocessing.scale(data, axis=0)
class FFT:
"""
Apply Fast Fourier Transform to the last axis.
"""
def get_name(self):
return "fft"
def apply(self, data):
axis = data.ndim - 1
return np.fft.rfft(data, axis=axis)
class ICA:
"""
apply ICA experimental!
"""
def __init__(self, n_components=None):
self.n_components = n_components
def get_name(self):
if self.n_components != None:
return "ICA%d" % (self.n_components)
else:
return 'ICA'
def apply(self, data):
# apply pca to each
ica = FastICA()
data = ica.fit_transform(da)
return data
class Resample:
"""
Resample time-series data.
"""
def __init__(self, sample_rate):
self.f = sample_rate
def get_name(self):
return "resample%d" % self.f
def apply(self, data):
axis = data.ndim - 1
if data.shape[-1] > self.f:
return resample(data, self.f, axis=axis)
return data
class Magnitude:
"""
Take magnitudes of Complex data
"""
def get_name(self):
return "mag"
def apply(self, data):
return np.absolute(data)
class LPF:
"""
Low-pass filter using FIR window
"""
def __init__(self, f):
self.f = f
def get_name(self):
return 'lpf%d' % self.f
def apply(self, data):
nyq = self.f / 2.0
cutoff = min(self.f, nyq - 1)
h = firwin(numtaps=101, cutoff=cutoff, nyq=nyq)
# data[ch][dim0]
# apply filter over each channel
for j in range(len(data)):
data[j] = lfilter(h, 1.0, data[j])
return data
class Mean:
"""
extract channel means
"""
def get_name(self):
return 'mean'
def apply(self, data):
axis = data.ndim - 1
return data.mean(axis=axis)
class Abs:
"""
extract channel means
"""
def get_name(self):
return 'abs'
def apply(self, data):
return np.abs(data)
class Stats:
"""
Subtract the mean, then take (min, max, standard_deviation) for each channel.
"""
def get_name(self):
return "stats"
def apply(self, data):
# data[ch][dim]
shape = data.shape
out = np.empty((shape[0], 3))
for i in range(len(data)):
ch_data = data[i]
ch_data = data[i] - np.mean(ch_data)
outi = out[i]
outi[0] = np.std(ch_data)
outi[1] = np.min(ch_data)
outi[2] = np.max(ch_data)
return out
class Interp:
"""
Interpolate zeros max --> min * 1.0
NOTE: try different methods later
"""
def get_name(self):
return "interp"
def apply(self, data):
# interps 0 data before taking log
indices = np.where(data <= 0)
data[indices] = np.max(data)
data[indices] = (np.min(data) * 0.1)
return data
class Log10:
"""
Apply Log10
"""
def get_name(self):
return "log10"
def apply(self, data):
# interps 0 data before taking log
indices = np.where(data <= 0)
data[indices] = np.max(data)
data[indices] = (np.min(data) * 0.1)
return np.log10(data)
class Slice:
"""
Take a slice of the data on the last axis.
e.g. Slice(1, 48) works like a normal python slice, that is 1-47 will be taken
"""
def __init__(self, start, end):
self.start = start
self.end = end
def get_name(self):
return "slice%d-%d" % (self.start, self.end)
def apply(self, data):
s = [slice(None), ] * data.ndim
s[-1] = slice(self.start, self.end)
return data[s]
class CorrelationMatrix:
"""
Calculate correlation coefficients matrix across all EEG channels.
"""
def get_name(self):
return 'corr-mat'
def apply(self, data):
return upper_right_triangle(np.corrcoef(data))
# Fix everything below here
class Eigenvalues:
"""
Take eigenvalues of a matrix, and sort them by magnitude in order to
make them useful as features (as they have no inherent order).
"""
def get_name(self):
return 'eigenvalues'
def apply(self, data):
w, v = np.linalg.eig(data)
w = np.absolute(w)
w.sort()
return w
class FreqCorrelation:
"""
Correlation in the frequency domain. First take FFT with (start, end) slice options,
then calculate correlation co-efficients on the FFT output, followed by calculating
eigenvalues on the correlation co-efficients matrix.
The output features are (fft, upper_right_diagonal(correlation_coefficients), eigenvalues)
Features can be selected/omitted using the constructor arguments.
"""
def __init__(self, start, end, scale_option, with_fft=False, with_corr=True, with_eigen=True):
self.start = start
self.end = end
self.scale_option = scale_option
self.with_fft = with_fft
self.with_corr = with_corr
self.with_eigen = with_eigen
assert scale_option in ('us', 'usf', 'none')
assert with_corr or with_eigen
def get_name(self):
selections = []
if not self.with_corr:
selections.append('nocorr')
if not self.with_eigen:
selections.append('noeig')
if len(selections) > 0:
selection_str = '-' + '-'.join(selections)
else:
selection_str = ''
return 'freq-correlation-%d-%d-%s-%s%s' % (self.start, self.end, 'withfft' if self.with_fft else 'nofft',
self.scale_option, selection_str)
def apply(self, data):
data1 = FFT().apply(data)
data1 = Slice(self.start, self.end).apply(data1)
data1 = Magnitude().apply(data1)
data1 = Log10().apply(data1)
data2 = data1
if self.scale_option == 'usf':
data2 = UnitScaleFeat().apply(data2)
elif self.scale_option == 'us':
data2 = UnitScale().apply(data2)
data2 = CorrelationMatrix().apply(data2)
if self.with_eigen:
w = Eigenvalues().apply(data2)
out = []
if self.with_corr:
data2 = upper_right_triangle(data2)
out.append(data2)
if self.with_eigen:
out.append(w)
if self.with_fft:
data1 = data1.ravel()
out.append(data1)
for d in out:
assert d.ndim == 1
return np.concatenate(out, axis=0)
class TimeCorrelation:
"""
Correlation in the time domain. First downsample the data, then calculate correlation co-efficients
followed by calculating eigenvalues on the correlation co-efficients matrix.
The output features are (upper_right_diagonal(correlation_coefficients), eigenvalues)
Features can be selected/omitted using the constructor arguments.
"""
def __init__(self, max_hz, scale_option, with_corr=True, with_eigen=True):
self.max_hz = max_hz
self.scale_option = scale_option
self.with_corr = with_corr
self.with_eigen = with_eigen
assert scale_option in ('us', 'usf', 'none')
assert with_corr or with_eigen
def get_name(self):
selections = []
if not self.with_corr:
selections.append('nocorr')
if not self.with_eigen:
selections.append('noeig')
if len(selections) > 0:
selection_str = '-' + '-'.join(selections)
else:
selection_str = ''
return 'time-correlation-r%d-%s%s' % (self.max_hz, self.scale_option, selection_str)
def apply(self, data):
# so that correlation matrix calculation doesn't crash
for ch in data:
if | np.alltrue(ch == 0.0) | pandas.alltrue |
from __future__ import division
from datetime import timedelta
from functools import partial
import itertools
from nose.tools import assert_true
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
import platform
if platform.system() != 'Windows':
from zipline.pipeline.loaders.blaze.estimates import (
BlazeNextEstimatesLoader,
BlazeNextSplitAdjustedEstimatesLoader,
BlazePreviousEstimatesLoader,
BlazePreviousSplitAdjustedEstimatesLoader,
)
from zipline.pipeline.loaders.earnings_estimates import (
INVALID_NUM_QTRS_MESSAGE,
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal, assert_raises_regex
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import platform
import unittest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date,
sids,
tuples,
end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples,
columns=[SID_FIELD_NAME,
'estimate',
'knowledge_date'])
df = df.pivot_table(columns=SID_FIELD_NAME,
values='estimate',
index='knowledge_date')
df = df.reindex(
pd.date_range(start_date, end_date)
)
# Index name is lost during reindex.
df.index = df.index.rename('knowledge_date')
df['at_date'] = end_date.tz_localize('utc')
df = df.set_index(['at_date', df.index.tz_localize('utc')]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp('2014-12-28')
END_DATE = pd.Timestamp('2015-02-04')
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError('make_loader')
@classmethod
def make_events(cls):
raise NotImplementedError('make_events')
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days, self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
's' + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(cls.events, {column.name: val for
column, val in
cls.columns.items()})
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: 'event_date',
MultipleColumnsEstimates.fiscal_quarter: 'fiscal_quarter',
MultipleColumnsEstimates.fiscal_year: 'fiscal_year',
MultipleColumnsEstimates.estimate1: 'estimate1',
MultipleColumnsEstimates.estimate2: 'estimate2'
}
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate1': [1., 2.],
'estimate2': [3., 4.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def make_expected_out(cls):
raise NotImplementedError('make_expected_out')
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp('2015-01-15', tz='utc'),
end_date=pd.Timestamp('2015-01-15', tz='utc'),
)
assert_frame_equal(results, self.expected_out)
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-10'),
'estimate1': 1.,
'estimate2': 3.,
FISCAL_QUARTER_FIELD_NAME: 1.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-20'),
'estimate1': 2.,
'estimate2': 4.,
FISCAL_QUARTER_FIELD_NAME: 2.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
dummy_df = pd.DataFrame({SID_FIELD_NAME: 0},
columns=[SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
'estimate'],
index=[0])
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1,
bad_dataset2,
good_dataset)
for c in dataset.columns}
p = Pipeline(columns)
with self.assertRaises(ValueError) as e:
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
assert_raises_regex(e, INVALID_NUM_QTRS_MESSAGE % "-1,-2")
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with self.assertRaises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = ["split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof"]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(itertools.product(
(NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader),
))
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
with self.assertRaises(ValueError):
loader(dummy_df,
{column.name: val for column, val in
columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"))
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp('2015-01-28')
q1_knowledge_dates = [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-04'),
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-11')]
q2_knowledge_dates = [pd.Timestamp('2015-01-14'),
pd.Timestamp('2015-01-17'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-23')]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')] # One day late
q2_release_dates = [pd.Timestamp('2015-01-25'), # One day early
pd.Timestamp('2015-01-26')]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates +
cls.q2_knowledge_dates,
4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (q1e1 < q1e2 and
q2e1 < q2e2 and
# All estimates are < Q2's event, so just constrain Q1
# estimates.
q1e1 < cls.q1_release_dates[0] and
q1e2 < cls.q1_release_dates[0]):
sid_estimates.append(cls.create_estimates_df(q1e1,
q1e2,
q2e1,
q2e2,
sid))
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates +
sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
'estimate': [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid
})
@classmethod
def create_estimates_df(cls,
q1e1,
q1e2,
q2e1,
q2e2,
sid):
return pd.DataFrame({
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
'estimate': [.1, .2, .3, .4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
})
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert_true(sid_estimates.isnull().all().all())
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[self.get_expected_estimate(
q1_knowledge[q1_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
q2_knowledge[q2_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
date.tz_localize(None),
).set_index([[date]]) for date in sid_estimates.index],
axis=0)
assert_equal(all_expected[sid_estimates.columns],
sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateLoaderTestCase(NextEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q2_knowledge.iloc[-1:]
elif (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateLoaderTestCase(PreviousEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate': [1., 2.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(columns=[cls.columns[col] + '1'
for col in cls.columns] +
[cls.columns[col] + '2'
for col in cls.columns],
index=cls.trading_days)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ('1', '2')
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(
expected[expected_name]
)
else:
expected[expected_name] = expected[
expected_name
].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge([{c.name + '1': c.latest for c in dataset1.columns},
{c.name + '2': c.latest for c in dataset2.columns}])
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + '1' for col in self.columns]
q2_columns = [col.name + '2' for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(sorted(np.array(q1_columns + q2_columns)),
sorted(results.columns.values))
assert_equal(self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1))
class NextEstimateMultipleQuarters(
WithEstimateMultipleQuarters, ZiplineTestCase
):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-11'),
raw_name + '1'
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp('2015-01-11'):pd.Timestamp('2015-01-20'),
raw_name + '1'
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ['estimate', 'event_date']:
expected.loc[
pd.Timestamp('2015-01-06'):pd.Timestamp('2015-01-10'),
col_name + '2'
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-09'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 2
expected.loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 3
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-20'),
FISCAL_YEAR_FIELD_NAME + '2'
] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateMultipleQuarters(NextEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class PreviousEstimateMultipleQuarters(
WithEstimateMultipleQuarters,
ZiplineTestCase
):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-19')
] = cls.events[raw_name].iloc[0]
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ['estimate', 'event_date']:
expected[col_name + '2'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[col_name].iloc[0]
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 4
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 2014
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 1
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateMultipleQuarters(PreviousEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13')] * 2,
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-20')],
'estimate': [11., 12., 21.] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6
})
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError('assert_compute')
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=pd.Timestamp('2015-01-13', tz='utc'),
# last event date we have
end_date=pd.Timestamp('2015-01-14', tz='utc'),
)
class PreviousVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousVaryingNumEstimates(PreviousVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class NextVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextVaryingNumEstimates(NextVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp('2015-02-10')
window_test_start_date = pd.Timestamp('2015-01-05')
critical_dates = [pd.Timestamp('2015-01-09', tz='utc'),
pd.Timestamp('2015-01-15', tz='utc'),
pd.Timestamp('2015-01-20', tz='utc'),
pd.Timestamp('2015-01-26', tz='utc'),
pd.Timestamp('2015-02-05', tz='utc'),
pd.Timestamp('2015-02-10', tz='utc')]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-02-10'),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp('2015-01-18')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-04-01')],
'estimate': [100., 101.] + [200., 201.] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
})
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-22'), pd.Timestamp('2015-01-22'),
pd.Timestamp('2015-02-05'), pd.Timestamp('2015-02-05')],
'estimate': [110., 111.] + [310., 311.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10
})
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-07'),
cls.window_test_start_date,
pd.Timestamp('2015-01-17')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10')],
'estimate': [120., 121.] + [220., 221.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20
})
concatted = pd.concat([sid_0_timeline,
sid_10_timeline,
sid_20_timeline]).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [sid for i in range(len(sids) - 1)
for sid in range(sids[i], sids[i+1])] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids()
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(self,
start_date,
num_announcements_out):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date) -
self.trading_days.get_loc(self.window_test_start_date) + 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = timelines[
num_announcements_out
].loc[today].reindex(
trading_days[:today_idx + 1]
).values
timeline_start_idx = (len(today_timeline) - window_len)
assert_almost_equal(estimate,
today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp('2015-02-10', tz='utc'),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-20')
),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-21')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 111, pd.Timestamp('2015-01-22')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-01-22', '2015-02-04')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, | pd.Timestamp('2015-01-20') | pandas.Timestamp |
"""
Functions to create candidate data DataFrames
"""
import pandas as pd
pd.options.mode.chained_assignment = None
def create_df(dictionary):
'''
Functions that converts dictionary into pandas DataFrame
Args:
dictionary: dictionary to be converted into pandas DataFrame
Returns:
created_df: pandas DataFrame
'''
created_df = | pd.DataFrame.from_dict(dictionary, orient='columns') | pandas.DataFrame.from_dict |
from copy import deepcopy as _deepcopy
import numpy as _np
import pandas as _pd
from scipy import integrate as _integrate
from atmPy.aerosols.size_distribution import moments as _sizedist_moment_conversion
from atmPy.general import timeseries as _timeseries
from atmPy.general import vertical_profile as _vertical_profile
from atmPy.radiation.mie_scattering import bhmie as _bhmie
# import atmPy.aerosols.size_distribution.sizedistribution as _sizedistribution
from atmPy.aerosols.size_distribution import sizedistribution as _sizedistribution
import warnings as _warnings
# Todo: Docstring is wrong
# todo: This function can be sped up by breaking it apart. Then have OpticalProperties
# have properties that call the subfunction on demand
def size_dist2optical_properties(op, sd, aod=False, noOfAngles=100):
"""
!!!Tis Docstring need fixn
Calculates the extinction crossection, AOD, phase function, and asymmetry Parameter for each layer.
plotting the layer and diameter dependent extinction coefficient gives you an idea what dominates the overall AOD.
Parameters
----------
wavelength: float.
wavelength of the scattered light, unit: nm
n: float.
Index of refraction of the scattering particles
noOfAngles: int, optional.
Number of scattering angles to be calculated. This mostly effects calculations which depend on the phase
function.
Returns
-------
OpticalProperty instance
"""
# if not _np.any(sd.index_of_refraction):
# txt = 'Refractive index is not specified. Either set self.index_of_refraction or set optional parameter n.'
# raise ValueError(txt)
# if not sd.sup_optical_properties_wavelength:
# txt = 'Please provied wavelength by setting the attribute sup_optical_properties_wavelength (in nm).'
# raise AttributeError(txt)
sd.parameters4reductions._check_opt_prop_param_exist()
wavelength = sd.parameters4reductions.wavelength.value
n = sd.parameters4reductions.refractive_index.value
mie_result = sd.parameters4reductions.mie_result.value
out = {}
sdls = sd.convert2numberconcentration()
index = sdls.data.index
dist_class = type(sdls).__name__
if dist_class not in ['SizeDist','SizeDist_TS','SizeDist_LS']:
raise TypeError('this distribution class (%s) can not be converted into optical property yet!'%dist_class)
# determin if index of refraction changes or if it is constant
if isinstance(n, _pd.DataFrame):
n_multi = True
else:
n_multi = False
if not n_multi:
if isinstance(mie_result, type(None)):
mie, angular_scatt_func = _perform_Miecalculations(_np.array(sdls.bincenters / 1000.), wavelength / 1000., n,
noOfAngles=noOfAngles)
else:
mie = mie_result['mie']
angular_scatt_func = mie_result['angular_scatt_func']
out['mie_result'] = {'mie': mie, 'angular_scatt_func': angular_scatt_func}
if aod:
#todo: use function that does a the interpolation instead of the sum?!? I guess this can lead to errors when layers are very thick, since centers are used instea dof edges?
AOD_layer = _np.zeros((len(sdls.layercenters)))
extCoeffPerLayer = _np.zeros((len(sdls.data.index.values), len(sdls.bincenters)), dtype= _np.float32)
scattCoeffPerLayer = _np.zeros((len(sdls.data.index.values), len(sdls.bincenters)), dtype= _np.float32)
absCoeffPerLayer = _np.zeros((len(sdls.data.index.values), len(sdls.bincenters)), dtype= _np.float32)
angular_scatt_func_effective = _pd.DataFrame()
asymmetry_parameter_LS = _np.zeros((len(sdls.data.index.values)))
#calculate optical properties for each line in the dataFrame
for i, lc in enumerate(sdls.data.index.values):
laydata = sdls.data.iloc[i].values # picking a size distribution (either a layer or a point in time)
if n_multi:
mie, angular_scatt_func = _perform_Miecalculations(_np.array(sdls.bincenters / 1000.), wavelength / 1000., n.iloc[i].values[0],
noOfAngles=noOfAngles)
extinction_coefficient = _get_coefficients(mie.extinction_crossection, laydata)
scattering_coefficient = _get_coefficients(mie.scattering_crossection, laydata)
absorption_coefficient = _get_coefficients(mie.absorption_crossection, laydata)
out['test.extcross'] = mie.extinction_crossection.copy()
out['test.extcoeff'] = extinction_coefficient.copy()
out['test.laydata'] = laydata
if aod:
layerThickness = sdls.layerbounderies[i][1] - sdls.layerbounderies[i][0]
AOD_perBin = extinction_coefficient * layerThickness
AOD_layer[i] = AOD_perBin.values.sum()
extCoeffPerLayer[i] = extinction_coefficient
scattCoeffPerLayer[i] = scattering_coefficient
absCoeffPerLayer[i] = absorption_coefficient
scattering_cross_eff = laydata * mie.scattering_crossection
pfe = (laydata * angular_scatt_func).sum(axis=1) # sum of all angular_scattering_intensities
x_2p = pfe.index.values
y_2p = pfe.values
# limit to [0,pi]
y_1p = y_2p[x_2p < _np.pi]
x_1p = x_2p[x_2p < _np.pi]
y_phase_func = y_1p * 4 * _np.pi / scattering_cross_eff.sum()
asymmetry_parameter_LS[i] = .5 * _integrate.simps(_np.cos(x_1p) * y_phase_func * _np.sin(x_1p), x_1p)
angular_scatt_func_effective[
lc] = pfe * 1e-12 * 1e6 # equivalent to extCoeffPerLayer # similar to _get_coefficients (converts everthing to meter)
if aod:
out['AOD'] = AOD_layer[~ _np.isnan(AOD_layer)].sum()
out['AOD_layer'] = _pd.DataFrame(AOD_layer, index=sdls.layercenters, columns=['AOD per Layer'])
out['AOD_cum'] = out['AOD_layer'].iloc[::-1].cumsum().iloc[::-1]
extCoeff_perrow_perbin = _pd.DataFrame(extCoeffPerLayer, index=index, columns=sdls.data.columns)
scattCoeff_perrow_perbin = _pd.DataFrame(scattCoeffPerLayer, index=index, columns=sdls.data.columns)
absCoeff_perrow_perbin = _pd.DataFrame(absCoeffPerLayer, index=index, columns=sdls.data.columns)
# if dist_class == 'SizeDist_TS':
# out['extCoeff_perrow_perbin'] = timeseries.TimeSeries_2D(extCoeff_perrow_perbin)
# if dist_class == 'SizeDist':
# out['extCoeff_perrow_perbin'] = _timeseries.TimeSeries(extCoeff_perrow_perbin)
# out['scattCoeff_perrow_perbin'] = _timeseries.TimeSeries(scattCoeff_perrow_perbin)
# out['absCoeff_perrow_perbin'] = _timeseries.TimeSeries(absCoeff_perrow_perbin)
# else:
out['extCoeff_perrow_perbin'] = extCoeff_perrow_perbin
out['scattCoeff_perrow_perbin'] = scattCoeff_perrow_perbin
out['absCoeff_perrow_perbin'] = absCoeff_perrow_perbin
out['parent_type'] = dist_class
out['asymmetry_param'] = _pd.DataFrame(asymmetry_parameter_LS, index=index,
columns=['asymmetry_param'])
out['wavelength'] = wavelength
out['index_of_refraction'] = n
out['bin_centers'] = sdls.bincenters
out['bins'] = sdls.bins
out['binwidth'] = sdls.binwidth
out['distType'] = sdls.distributionType
out['angular_scatt_func'] = angular_scatt_func_effective.transpose()
### test values
out['mie_curve_ext'] = mie.extinction_crossection
out['mie_inst'] = mie
return out
def DEPRECATED_size_dist2optical_properties(sd, aod=False, noOfAngles=100):
"""
!!!Tis Docstring need fixn
Calculates the extinction crossection, AOD, phase function, and asymmetry Parameter for each layer.
plotting the layer and diameter dependent extinction coefficient gives you an idea what dominates the overall AOD.
Parameters
----------
wavelength: float.
wavelength of the scattered light, unit: nm
n: float.
Index of refraction of the scattering particles
noOfAngles: int, optional.
Number of scattering angles to be calculated. This mostly effects calculations which depend on the phase
function.
Returns
-------
OpticalProperty instance
"""
# if not _np.any(sd.index_of_refraction):
# txt = 'Refractive index is not specified. Either set self.index_of_refraction or set optional parameter n.'
# raise ValueError(txt)
# if not sd.sup_optical_properties_wavelength:
# txt = 'Please provied wavelength by setting the attribute sup_optical_properties_wavelength (in nm).'
# raise AttributeError(txt)
sd.optical_properties_settings._check()
wavelength = sd.optical_properties_settings.wavelength.value
n = sd.optical_properties_settings.refractive_index.value
out = {}
sdls = sd.convert2numberconcentration()
index = sdls.data.index
dist_class = type(sdls).__name__
if dist_class not in ['SizeDist','SizeDist_TS','SizeDist_LS']:
raise TypeError('this distribution class (%s) can not be converted into optical property yet!'%dist_class)
# determin if index of refraction changes or if it is constant
if isinstance(n, _pd.DataFrame):
n_multi = True
else:
n_multi = False
if not n_multi:
mie, angular_scatt_func = _perform_Miecalculations(_np.array(sdls.bincenters / 1000.), wavelength / 1000., n,
noOfAngles=noOfAngles)
if aod:
#todo: use function that does a the interpolation instead of the sum?!? I guess this can lead to errors when layers are very thick, since centers are used instea dof edges?
AOD_layer = _np.zeros((len(sdls.layercenters)))
extCoeffPerLayer = _np.zeros((len(sdls.data.index.values), len(sdls.bincenters)))
scattCoeffPerLayer = _np.zeros((len(sdls.data.index.values), len(sdls.bincenters)))
absCoeffPerLayer = _np.zeros((len(sdls.data.index.values), len(sdls.bincenters)))
angular_scatt_func_effective = _pd.DataFrame()
asymmetry_parameter_LS = _np.zeros((len(sdls.data.index.values)))
#calculate optical properties for each line in the dataFrame
for i, lc in enumerate(sdls.data.index.values):
laydata = sdls.data.iloc[i].values # picking a size distribution (either a layer or a point in time)
if n_multi:
mie, angular_scatt_func = _perform_Miecalculations(_np.array(sdls.bincenters / 1000.), wavelength / 1000., n.iloc[i].values[0],
noOfAngles=noOfAngles)
extinction_coefficient = _get_coefficients(mie.extinction_crossection, laydata)
scattering_coefficient = _get_coefficients(mie.scattering_crossection, laydata)
absorption_coefficient = _get_coefficients(mie.absorption_crossection, laydata)
if aod:
layerThickness = sdls.layerbounderies[i][1] - sdls.layerbounderies[i][0]
AOD_perBin = extinction_coefficient * layerThickness
AOD_layer[i] = AOD_perBin.values.sum()
extCoeffPerLayer[i] = extinction_coefficient
scattCoeffPerLayer[i] = scattering_coefficient
absCoeffPerLayer[i] = absorption_coefficient
scattering_cross_eff = laydata * mie.scattering_crossection
pfe = (laydata * angular_scatt_func).sum(axis=1) # sum of all angular_scattering_intensities
x_2p = pfe.index.values
y_2p = pfe.values
# limit to [0,pi]
y_1p = y_2p[x_2p < _np.pi]
x_1p = x_2p[x_2p < _np.pi]
y_phase_func = y_1p * 4 * _np.pi / scattering_cross_eff.sum()
asymmetry_parameter_LS[i] = .5 * _integrate.simps(_np.cos(x_1p) * y_phase_func * _np.sin(x_1p), x_1p)
angular_scatt_func_effective[
lc] = pfe * 1e-12 * 1e6 # equivalent to extCoeffPerLayer # similar to _get_coefficients (converts everthing to meter)
if aod:
out['AOD'] = AOD_layer[~ _np.isnan(AOD_layer)].sum()
out['AOD_layer'] = _pd.DataFrame(AOD_layer, index=sdls.layercenters, columns=['AOD per Layer'])
out['AOD_cum'] = out['AOD_layer'].iloc[::-1].cumsum().iloc[::-1]
extCoeff_perrow_perbin = _pd.DataFrame(extCoeffPerLayer, index=index, columns=sdls.data.columns)
scattCoeff_perrow_perbin = _pd.DataFrame(scattCoeffPerLayer, index=index, columns=sdls.data.columns)
absCoeff_perrow_perbin = _pd.DataFrame(absCoeffPerLayer, index=index, columns=sdls.data.columns)
# if dist_class == 'SizeDist_TS':
# out['extCoeff_perrow_perbin'] = timeseries.TimeSeries_2D(extCoeff_perrow_perbin)
if dist_class == 'SizeDist':
out['extCoeff_perrow_perbin'] = _timeseries.TimeSeries(extCoeff_perrow_perbin)
out['scattCoeff_perrow_perbin'] = _timeseries.TimeSeries(scattCoeff_perrow_perbin)
out['absCoeff_perrow_perbin'] = _timeseries.TimeSeries(absCoeff_perrow_perbin)
else:
out['extCoeff_perrow_perbin'] = extCoeff_perrow_perbin
out['scattCoeff_perrow_perbin'] = scattCoeff_perrow_perbin
out['absCoeff_perrow_perbin'] = absCoeff_perrow_perbin
# extCoeff_perrow = pd.DataFrame(extCoeff_perrow_perbin.sum(axis=1), columns=['ext_coeff'])
# if index.dtype == '<M8[ns]':
# out['extCoeff_perrow'] = timeseries.TimeSeries(extCoeff_perrow)
# else:
# out['extCoeff_perrow'] = extCoeff_perrow
out['parent_type'] = dist_class
out['asymmetry_param'] = _pd.DataFrame(asymmetry_parameter_LS, index=index,
columns=['asymmetry_param'])
# out['asymmetry_param_alt'] = pd.DataFrame(asymmetry_parameter_LS_alt, index=sdls.layercenters, columns = ['asymmetry_param_alt'])
# out['OptPropInstance']= OpticalProperties(out, self.bins)
out['wavelength'] = wavelength
out['index_of_refraction'] = n
out['bin_centers'] = sdls.bincenters
out['bins'] = sdls.bins
out['binwidth'] = sdls.binwidth
out['distType'] = sdls.distributionType
out['angular_scatt_func'] = angular_scatt_func_effective
# opt_properties = OpticalProperties(out, self.bins)
# opt_properties.wavelength = wavelength
# opt_properties.index_of_refractio = n
# opt_properties.angular_scatt_func = angular_scatt_func_effective # This is the formaer phase_fct, but since it is the angular scattering intensity, i changed the name
# opt_properties.parent_dist_LS = self
if dist_class == 'SizeDist_TS':
return OpticalProperties_TS(out, parent = sd)
elif dist_class == 'SizeDist_LS':
return OpticalProperties_VP(out, parent= sd)
return out
def hemispheric_backscattering(osf_df):
"""scattering into backwards hemisphere from angulare scattering intensity
Parameters
----------
osf_df: pandas DataFrame
This contains the angulare scattering intensity with column names giving the
angles in radiant
Returns
-------
pandas data frame with the scattering intensities
"""
import pdb
# pdb.set_trace()
def ang_scat_funk2bs(index,ol):
x = index #_np.deg2rad(index)
f = ol
# pdb.set_trace()
# my phase function goes all the way to two py
f = f[x < _np.pi]
x = x[x < _np.pi]
f_b = f[x >= _np.pi / 2.]
x_b = x[x >= _np.pi / 2.]
# pdb.set_trace()
res_b = 2 * _np.pi * _integrate.simps(f_b * _np.sin(x_b), x_b)
return res_b
bs = _np.zeros(osf_df.shape[0])
index = osf_df.columns
for i in range(osf_df.shape[0]):
ol = osf_df.iloc[i,:].values
bs[i] = ang_scat_funk2bs(index,ol)
bs = _pd.DataFrame(bs, index = osf_df.index)
return bs
def hemispheric_forwardscattering(osf_df):
"""scattering into forward hemisphere from angulare scattering intensity
Parameters
----------
osf_df: pandas DataFrame
This contains the angulare scattering intensity with column names giving the
angles in radiant
Returns
-------
pandas data frame with the scattering intensities
"""
def ang_scat_funk2fs(index,ol):
x = index #ol.index.values
f = ol
# my phase function goes all the way to two py
f = f[x < _np.pi]
x = x[x < _np.pi]
f_f = f[x < _np.pi / 2.]
x_f = x[x < _np.pi / 2.]
res_f = 2 * _np.pi * _integrate.simps(f_f * _np.sin(x_f), x_f)
return res_f
fs = _np.zeros(osf_df.shape[0])
index = osf_df.columns
for i in range(osf_df.shape[0]):
ol = osf_df.iloc[i,:].values
fs[i] = ang_scat_funk2fs(index,ol)
fs = _pd.DataFrame(fs, index = osf_df.index)
return fs
#Todo: bins are redundand
# Todo: some functions should be switched of
# todo: right now this for layer and time series, not ok
class OpticalProperties(object):
def __init__(self, parent):
self._parent_sizedist = parent
self.parameters = _sizedistribution._Parameters4Reductions_opt_prop(parent)
# self.asymmetry_param = data['asymmetry_param']
self._extinction_coeff = None
self._scattering_coeff = None
self._absorption_coeff = None
self._mie_result = None
self._hemispheric_backscattering = None
# self._hemispheric_backscattering_ratio = None
self._hemispheric_forwardscattering = None
# self._hemispheric_forwardscattering_ratio = None
self._optical_porperties_pv = None
self.mean_effective_diameter = None
self._parent_type = type(parent).__name__
self.bins = parent.bins
self.binwidth = parent.binwidth
self.distributionType = parent.distributionType
# self._data_period = self.parent_sizedist._data_period
@property
def extinction_coeff_per_bin(self):
self._optical_porperties
return self._extinction_coeff_per_bin
@property
def scattering_coeff_per_bin(self):
self._optical_porperties
return self._scattering_coeff_per_bin
@property
def absorption_coeff_per_bin(self):
self._optical_porperties
return self._absorption_coeff_per_bin
@property
def angular_scatt_func(self):
self._optical_porperties
return self._angular_scatt_func
@property
def _optical_porperties(self):
if not self._optical_porperties_pv:
data = size_dist2optical_properties(self, self._parent_sizedist)
self._optical_porperties_pv = data
####
self._extinction_coeff_per_bin = data['extCoeff_perrow_perbin']
self._extinction_coeff = _pd.DataFrame(self._extinction_coeff_per_bin.sum(axis=1), columns=['ext_coeff_m^1'])
####
self._scattering_coeff_per_bin = data['scattCoeff_perrow_perbin']
self._scattering_coeff = _pd.DataFrame(self._scattering_coeff_per_bin.sum(axis=1), columns=['scatt_coeff_m^1'])
#####
self._absorption_coeff_per_bin = data['absCoeff_perrow_perbin']
self._absorption_coeff = _pd.DataFrame(self._absorption_coeff_per_bin.sum(axis=1), columns=['abs_coeff_m^1'])
####
self._angular_scatt_func = data['angular_scatt_func']
####
self.parameters.mie_result = data['mie_result']
return self._optical_porperties_pv
@property
def absorption_coeff(self):
self._optical_porperties
return self._absorption_coeff
@property
def extinction_coeff(self):
self._optical_porperties
return self._extinction_coeff
@property
def scattering_coeff(self):
self._optical_porperties
return self._scattering_coeff
@property
def hemispheric_backscattering(self):
if not _np.any(self._hemispheric_backscattering):
self._hemispheric_backscattering = hemispheric_backscattering(self.angular_scatt_func)
self._hemispheric_backscattering_ratio = _pd.DataFrame(
self._hemispheric_backscattering.iloc[:, 0] / self._scattering_coeff.iloc[:, 0],
columns=['hem_back_scatt_ratio'])
return self._hemispheric_backscattering
@property
def hemispheric_backscattering_ratio(self):
self.hemispheric_backscattering
# if not _np.any(self._hemispheric_backscattering_ratio):
# self._hemispheric_backscattering_ratio = _pd.DataFrame(self.hemispheric_backscattering.iloc[:,0] / self._scattering_coeff.iloc[:,0], columns=['hem_beck_scatt_ratio'])
return self._hemispheric_backscattering_ratio
@property
def hemispheric_forwardscattering(self):
if not _np.any(self._hemispheric_forwardscattering):
self._hemispheric_forwardscattering = hemispheric_forwardscattering(self.angular_scatt_func)
self._hemispheric_forwardscattering_ratio = _pd.DataFrame(self._hemispheric_forwardscattering.iloc[:, 0] / self._scattering_coeff.iloc[:, 0],
columns=['hem_forward_scatt_ratio'])
return self._hemispheric_forwardscattering
@property
def hemispheric_forwardscattering_ratio(self):
self.hemispheric_forwardscattering
# if not _np.any(self._hemispheric_forwardscattering_ratio):
# self._hemispheric_forwardscattering_ratio = self.hemispheric_forwardscattering / self.scattering_coeff
return self._hemispheric_forwardscattering_ratio
def convert_between_moments(self, moment, verbose = False):
return _sizedist_moment_conversion.convert(self,moment, verbose = verbose)
def copy(self):
return _deepcopy(self)
#Todo: bins are redundand
# Todo: some functions should be switched of
# todo: right now this for layer and time series, not ok
class DEPRECATEDOpticalProperties(object):
def __init__(self, data, parent = None):
self.parent_sizedist = parent
self.data_orig = data
self.wavelength = data['wavelength']
self.index_of_refraction = data['index_of_refraction']
self.extinction_coeff_per_bin = data['extCoeff_perrow_perbin']
self.scattering_coeff_per_bin = data['scattCoeff_perrow_perbin']
self.absorption_coeff_per_bin = data['absCoeff_perrow_perbin']
self.angular_scatt_func = data['angular_scatt_func']
# self.asymmetry_param = data['asymmetry_param']
self.__extinction_coeff_sum_along_d = None
self.__scattering_coeff_sum_along_d = None
self.__absorption_coeff_sum_along_d = None
self.mean_effective_diameter = None
self._parent_type = data['parent_type']
self.bins = data['bins']
self.binwidth = data['binwidth']
self.distributionType = data['distType']
# self._data_period = self.parent_sizedist._data_period
# @property
# def mean_effective_diameter(self):
# if not self.__mean_effective_diameter:
# # todo: remove
# @property
# def extinction_coeff_sum_along_d(self):
# _warnings.warn('extinction_coeff_sum_along_d is deprecated and will be removed in future versions. Use extingction_coeff instead')
# if not _np.any(self.__extinction_coeff_sum_along_d):
# data = self.extinction_coeff_per_bin.data.sum(axis = 1)
# df = _pd.DataFrame()
# df['ext_coeff_m^1'] = data
# if self._parent_type == 'SizeDist_TS':
# self.__extinction_coeff_sum_along_d = _timeseries.TimeSeries(df)
# elif self._parent_type == 'SizeDist':
# self.__extinction_coeff_sum_along_d = df
# else:
# raise TypeError('not possible for this distribution type')
# self.__extinction_coeff_sum_along_d._data_period = self._data_period
# return self.__extinction_coeff_sum_along_d
#
# # todo: remove
# @extinction_coeff_sum_along_d.setter
# def extinction_coeff_sum_along_d(self, data):
# self.__extinction_coeff_sum_along_d = data
@property
def extinction_coeff(self):
if not _np.any(self.__extinction_coeff_sum_along_d):
data = self.extinction_coeff_per_bin.data.sum(axis=1)
df = _pd.DataFrame()
df['ext_coeff_m^1'] = data
if self._parent_type == 'SizeDist_TS':
self.__extinction_coeff_sum_along_d = _timeseries.TimeSeries(df)
self.__extinction_coeff_sum_along_d._data_period = self._data_period
elif self._parent_type == 'SizeDist_LS':
self.__extinction_coeff_sum_along_d = _vertical_profile.VerticalProfile(df)
elif self._parent_type == 'SizeDist':
self.__extinction_coeff_sum_along_d = df
else:
raise TypeError('not possible for this distribution type')
return self.__extinction_coeff_sum_along_d
@extinction_coeff.setter
def extinction_coeff(self, data):
self.__extinction_coeff_sum_along_d = data
@property
def scattering_coeff(self):
if not _np.any(self.__scattering_coeff_sum_along_d):
data = self.scattering_coeff_per_bin.data.sum(axis=1)
df = _pd.DataFrame()
df['scatt_coeff_m^1'] = data
if self._parent_type == 'SizeDist_TS':
self.__scattering_coeff_sum_along_d = _timeseries.TimeSeries(df)
elif self._parent_type == 'SizeDist':
self.__scattering_coeff_sum_along_d = df
else:
raise TypeError('not possible for this distribution type')
self.__scattering_coeff_sum_along_d._data_period = self._data_period
return self.__scattering_coeff_sum_along_d
@scattering_coeff.setter
def scattering_coeff(self, data):
self.__scattering_coeff_sum_along_d = data
@property
def absorption_coeff(self):
if not _np.any(self.__absorption_coeff_sum_along_d):
data = self.absorption_coeff_per_bin.data.sum(axis=1)
df = _pd.DataFrame()
df['abs_coeff_m^1'] = data
if self._parent_type == 'SizeDist_TS':
self.__absorption_coeff_sum_along_d = _timeseries.TimeSeries(df)
elif self._parent_type == 'SizeDist':
self.__absorption_coeff_sum_along_d = df
else:
raise TypeError('not possible for this distribution type')
self.__absorption_coeff_sum_along_d._data_period = self._data_period
return self.__absorption_coeff_sum_along_d
@absorption_coeff.setter
def absorption_coeff(self, data):
self.__absorption_coeff_sum_along_d = data
@property
def hemispheric_backscattering(self):
if not self.__hemispheric_backscattering:
self.__hemispheric_backscattering = hemispheric_backscattering(self.angular_scatt_func)
return self.__hemispheric_backscattering
@property
def hemispheric_forwardscattering(self):
if not self.__hemispheric_forwardscattering:
self.__hemispheric_forwardscattering = hemispheric_forwardscattering(self.angular_scatt_func)
return self.__hemispheric_forwardscattering
@property
def hemispheric_backscattering_ratio(self):
if not self.__hemispheric_backscattering_ratio:
self.__hemispheric_backscattering_ratio = self.hemispheric_backscattering / self.scattering_coeff
return self.__hemispheric_backscattering_ratio
@property
def hemispheric_forwardscattering_ratio(self):
if not self.hemispheric_forwardscattering_ratio:
self.__hemispheric_forwardscattering_ratio = self.hemispheric_forwardscattering / self.scattering_coeff
return self.__hemispheric_forwardscattering_ratio
def convert_between_moments(self, moment, verbose = False):
return _sizedist_moment_conversion.convert(self,moment, verbose = verbose)
def copy(self):
return _deepcopy(self)
class OpticalProperties_TS(OpticalProperties):
@property
def hemispheric_forwardscattering(self):
super().hemispheric_forwardscattering
return _timeseries.TimeSeries(self._hemispheric_forwardscattering, sampling_period = self._parent_sizedist._data_period)
@property
def hemispheric_backscattering(self):
super().hemispheric_backscattering
return _timeseries.TimeSeries(self._hemispheric_backscattering, sampling_period = self._parent_sizedist._data_period)
@property
def hemispheric_backscattering_ratio(self):
self.hemispheric_backscattering
return _timeseries.TimeSeries(self._hemispheric_backscattering_ratio, sampling_period = self._parent_sizedist._data_period)
@property
def hemispheric_forwardscattering_ratio(self):
self.hemispheric_forwardscattering
return _timeseries.TimeSeries(self._hemispheric_forwardscattering_ratio, sampling_period = self._parent_sizedist._data_period)
@property
def absorption_coeff(self):
self._optical_porperties
return _timeseries.TimeSeries(self._absorption_coeff, sampling_period = self._parent_sizedist._data_period)
@property
def extinction_coeff(self):
self._optical_porperties
return _timeseries.TimeSeries(self._extinction_coeff, sampling_period = self._parent_sizedist._data_period)
@property
def scattering_coeff(self):
self._optical_porperties
return _timeseries.TimeSeries(self._scattering_coeff, sampling_period = self._parent_sizedist._data_period)
# def __init__(self, *args, **kwargs):
# super().__init__(*args, **kwargs)
# self.extinction_coeff_per_bin = _timeseries.TimeSeries_2D(self.extinction_coeff_per_bin)
# self.extinction_coeff_per_bin._data_period = self.parent_sizedist._data_period
#
# self.scattering_coeff_per_bin = _timeseries.TimeSeries_2D(self.scattering_coeff_per_bin)
# self.scattering_coeff_per_bin._data_period = self.parent_sizedist._data_period
#
# self.absorption_coeff_per_bin = _timeseries.TimeSeries_2D(self.absorption_coeff_per_bin)
# self.absorption_coeff_per_bin._data_period = self.parent_sizedist._data_period
#
# self.angular_scatt_func = _timeseries.TimeSeries_2D(self.angular_scatt_func.transpose())
# self.angular_scatt_func._data_period = self.parent_sizedist._data_period
#
# self.__hemispheric_forwardscattering = None
# self.__hemispheric_backscattering = None
# self.__hemispheric_backscattering_ratio = None
# self.__hemispheric_forwardscattering_ratio = None
# self._data_period = self.parent_sizedist._data_period
#
#
#
# @property
# def hemispheric_backscattering(self):
# if not self.__hemispheric_backscattering:
# out = hemispheric_backscattering(self.angular_scatt_func.data)
# out = _timeseries.TimeSeries(out)
# out._data_period = self.angular_scatt_func._data_period
# self.__hemispheric_backscattering = out
# return self.__hemispheric_backscattering
#
# @hemispheric_backscattering.setter
# def hemispheric_backscattering(self,value):
# self.__hemispheric_backscattering = value
#
# @property
# def hemispheric_forwardscattering(self):
# if not self.__hemispheric_forwardscattering:
# out = hemispheric_forwardscattering(self.angular_scatt_func.data)
# out = _timeseries.TimeSeries(out)
# out._data_period = self.angular_scatt_func._data_period
# self.__hemispheric_forwardscattering = out
# return self.__hemispheric_forwardscattering
#
#
# @hemispheric_forwardscattering.setter
# def hemispheric_forwardscattering(self, value):
# self.__hemispheric_forwardscattering = value
#
# @property
# def hemispheric_backscattering_ratio(self):
# """ratio between backscattering and overall scattering"""
# if not self.__hemispheric_backscattering_ratio:
# # self.__hemispheric_backscattering_ratio = self.hemispheric_backscattering / self.extinction_coeff
# self.__hemispheric_backscattering_ratio = self.hemispheric_backscattering / self.scattering_coeff
# return self.__hemispheric_backscattering_ratio
#
# @property
# def hemispheric_forwardscattering_ratio(self):
# """ratio between forwardscattering and over scattering"""
# if not self.__hemispheric_forwardscattering_ratio:
# self.__hemispheric_forwardscattering_ratio = self.hemispheric_forwardscattering / self.scattering_coeff
# return self.__hemispheric_forwardscattering_ratio
class OpticalProperties_VP(OpticalProperties):
@property
def hemispheric_forwardscattering(self):
super().hemispheric_forwardscattering
return _vertical_profile.VerticalProfile(self._hemispheric_forwardscattering)
@property
def hemispheric_backscattering(self):
super().hemispheric_backscattering
return _vertical_profile.VerticalProfile(self._hemispheric_backscattering)
@property
def hemispheric_backscattering_ratio(self):
self.hemispheric_backscattering
return _vertical_profile.VerticalProfile(self._hemispheric_backscattering_ratio)
@property
def hemispheric_forwardscattering_ratio(self):
self.hemispheric_forwardscattering
return _vertical_profile.VerticalProfile(self._hemispheric_forwardscattering_ratio)
@property
def absorption_coeff(self):
self._optical_porperties
return _vertical_profile.VerticalProfile(self._absorption_coeff)
@property
def extinction_coeff(self):
self._optical_porperties
return _vertical_profile.VerticalProfile(self._extinction_coeff)
@property
def scattering_coeff(self):
self._optical_porperties
return _vertical_profile.VerticalProfile(self._scattering_coeff)
@property
def _optical_porperties(self):
if not self._optical_porperties_pv:
super()._optical_porperties
layerthickness = self._parent_sizedist.layerbounderies[:, 1] - self._parent_sizedist.layerbounderies[:, 0]
aod_per_bin_per_layer = self._parent_sizedist.optical_properties.extinction_coeff_per_bin.multiply(layerthickness, axis=0)
aod_per_layer = _pd.DataFrame(aod_per_bin_per_layer.sum(axis=1), columns=['aod_per_layer'])
self._aod = aod_per_layer.values.sum()
aod_cumulative = aod_per_layer.iloc[::-1].cumsum()
aod_cumulative.rename(columns={'aod_per_layer': 'aod'}, inplace=True)
self._aod_cumulative = aod_cumulative
return self._optical_porperties_pv
@property
def aod(self):
self._optical_porperties
return self._aod
@property
def aod_cumulative(self):
self._optical_porperties
return _vertical_profile.VerticalProfile(self._aod_cumulative)
class DEPRECATED_OpticalProperties_VP(OpticalProperties):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.extinction_coeff_per_bin = _vertical_profile.VerticalProfile_2D(self.extinction_coeff_per_bin)
self.aerosol_optical_depth_cumulative_VP = _vertical_profile.VerticalProfile(self._data_dict['AOD_cum'])
self.asymmetry_param_VP = _vertical_profile.VerticalProfile(self._data_dict['asymmetry_param'])
self.aerosol_optical_depth_cumulative = self._data_dict['AOD']
class ExtinctionCoeffVerticlProfile(_vertical_profile.VerticalProfile):
def __init__(self, ext, parent, wavelength, index_of_refraction):
super(ExtinctionCoeffVerticlProfile, self).__init__(ext)
self.parent = parent
self.wavelength = wavelength
self.index_of_refraction = index_of_refraction
def plot(self, *args, **kwargs):
a = super(ExtinctionCoeffVerticlProfile, self).plot(*args, **kwargs)
a.set_xlabel('Extinction coefficient (m$^{-1}$)')
return a
def _perform_Miecalculations(diam, wavelength, n, noOfAngles=100.):
"""
Performs Mie calculations
Parameters
----------
diam: NumPy array of floats
Array of diameters over which to perform Mie calculations; units are um
wavelength: float
Wavelength of light in um for which to perform calculations
n: complex
Ensemble complex index of refraction
Returns
panda DataTable with the diameters as the index and the mie_scattering results in the different collumns
total_extinction_coefficient: this takes the sum of all particles crossections of the particular diameter in a qubic
meter. This is in principle the AOD of an L
"""
diam = _np.asarray(diam)
extinction_efficiency = _np.zeros(diam.shape)
scattering_efficiency = _np.zeros(diam.shape)
absorption_efficiency = _np.zeros(diam.shape)
extinction_crossection = _np.zeros(diam.shape)
scattering_crossection = _np.zeros(diam.shape)
absorption_crossection = _np.zeros(diam.shape)
# phase_function_natural = pd.DataFrame()
angular_scattering_natural = _pd.DataFrame()
# extinction_coefficient = np.zeros(diam.shape)
# scattering_coefficient = np.zeros(diam.shape)
# absorption_coefficient = np.zeros(diam.shape)
# Function for calculating the size parameter for wavelength l and radius r
sp = lambda r, l: 2. * _np.pi * r / l
for e, d in enumerate(diam):
radius = d / 2.
# print('sp(radius, wavelength)', sp(radius, wavelength))
# print('n', n)
# print('d', d)
mie = _bhmie.bhmie_hagen(sp(radius, wavelength), n, noOfAngles, diameter=d)
values = mie.return_Values_as_dict()
extinction_efficiency[e] = values['extinction_efficiency']
# print("values['extinction_crosssection']",values['extinction_crosssection'])
scattering_efficiency[e] = values['scattering_efficiency']
absorption_efficiency[e] = values['extinction_efficiency'] - values['scattering_efficiency']
extinction_crossection[e] = values['extinction_crosssection']
scattering_crossection[e] = values['scattering_crosssection']
absorption_crossection[e] = values['extinction_crosssection'] - values['scattering_crosssection']
# phase_function_natural[d] = values['phaseFct_natural']['Phase_function_natural'].values
angular_scattering_natural[d] = mie.get_angular_scatt_func().natural.values
# print('\n')
# phase_function_natural.index = values['phaseFct_natural'].index
angular_scattering_natural.index = mie.get_angular_scatt_func().index
out = _pd.DataFrame(index=diam)
out['extinction_efficiency'] = _pd.Series(extinction_efficiency, index=diam)
out['scattering_efficiency'] = _pd.Series(scattering_efficiency, index=diam)
out['absorption_efficiency'] = _pd.Series(absorption_efficiency, index=diam)
out['extinction_crossection'] = | _pd.Series(extinction_crossection, index=diam) | pandas.Series |
from io import StringIO
from pathlib import Path
import pytest
import pandas as pd
from pandas import DataFrame, read_json
import pandas._testing as tm
from pandas.io.json._json import JsonReader
@pytest.fixture
def lines_json_df():
df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
return df.to_json(lines=True, orient="records")
def test_read_jsonl():
# GH9180
result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True)
expected = | DataFrame([[1, 2], [1, 2]], columns=["a", "b"]) | pandas.DataFrame |
import pandas as pd
import numpy as np
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score, median_absolute_error
from sklearn.preprocessing import MinMaxScaler, StandardScaler
import pickle
chicago_bike_data_Q1= pd.read_csv('datasets/Divvy_Trips_2018_Q1.csv')
chicago_bike_data_Q2= pd.read_csv('datasets/Divvy_Trips_2018_Q2.csv')
chicago_bike_data_Q3= pd.read_csv('datasets/Divvy_Trips_2018_Q3.csv')
chicago_bike_data_Q4= pd.read_csv('datasets/Divvy_Trips_2018_Q4.csv')
chicago_bike_data_Q1 = chicago_bike_data_Q1.rename(columns={"03 - Rental Start Station Name":"from_station_name",
"02 - Rental End Station Name":"to_station_name","01 - Rental Details Rental ID":"trip_id"})
data_groupby_day_out_Q1 = pd.DataFrame(chicago_bike_data_Q1.groupby(['from_station_name'])['trip_id'].count()).reset_index()
data_groupby_day_in_Q1 = pd.DataFrame(chicago_bike_data_Q1.groupby(['to_station_name'])['trip_id'].count()).reset_index()
data_groupby_day_out_Q1= data_groupby_day_out_Q1.rename(columns={"trip_id": "Number Of Outgoing Trips",
"from_station_name":"Station Name"})
data_groupby_day_in_Q1 = data_groupby_day_in_Q1.rename(columns={"trip_id": "Number Of Incoming Trips",
"to_station_name":"Station Name"})
new_df_Q1=pd.merge(data_groupby_day_out_Q1,data_groupby_day_in_Q1,on=['Station Name'],how='outer')
data_groupby_day_out_Q2 = pd.DataFrame(chicago_bike_data_Q2.groupby(['from_station_name'])['trip_id'].count()).reset_index()
data_groupby_day_in_Q2 = pd.DataFrame(chicago_bike_data_Q2.groupby(['to_station_name'])['trip_id'].count()).reset_index()
data_groupby_day_out_Q2= data_groupby_day_out_Q2.rename(columns={"trip_id": "Number Of Outgoing Trips",
"from_station_name":"Station Name"})
data_groupby_day_in_Q2 = data_groupby_day_in_Q2.rename(columns={"trip_id": "Number Of Incoming Trips",
"to_station_name":"Station Name"})
new_df_Q2=pd.merge(data_groupby_day_out_Q2,data_groupby_day_in_Q2,on=['Station Name'],how='outer')
data_groupby_day_out_Q3 = pd.DataFrame(chicago_bike_data_Q3.groupby(['from_station_name'])['trip_id'].count()).reset_index()
data_groupby_day_in_Q3 = pd.DataFrame(chicago_bike_data_Q3.groupby(['to_station_name'])['trip_id'].count()).reset_index()
data_groupby_day_out_Q3= data_groupby_day_out_Q3.rename(columns={"trip_id": "Number Of Outgoing Trips",
"from_station_name":"Station Name"})
data_groupby_day_in_Q3 = data_groupby_day_in_Q3.rename(columns={"trip_id": "Number Of Incoming Trips",
"to_station_name":"Station Name"})
new_df_Q3=pd.merge(data_groupby_day_out_Q3,data_groupby_day_in_Q3,on=['Station Name'],how='outer')
data_groupby_day_out_Q4 = pd.DataFrame(chicago_bike_data_Q4.groupby(['from_station_name'])['trip_id'].count()).reset_index()
data_groupby_day_in_Q4 = pd.DataFrame(chicago_bike_data_Q4.groupby(['to_station_name'])['trip_id'].count()).reset_index()
data_groupby_day_out_Q4= data_groupby_day_out_Q4.rename(columns={"trip_id": "Number Of Outgoing Trips",
"from_station_name":"Station Name"})
data_groupby_day_in_Q4 = data_groupby_day_in_Q4.rename(columns={"trip_id": "Number Of Incoming Trips",
"to_station_name":"Station Name"})
new_df_Q4=pd.merge(data_groupby_day_out_Q4,data_groupby_day_in_Q4,on=['Station Name'],how='outer')
new_df = pd.concat([new_df_Q1,new_df_Q2,new_df_Q3,new_df_Q4])
top_rentals = new_df.sort_values(['Number Of Outgoing Trips'], ascending=False).iloc[0:10,0:2].reset_index()
chicago_bike_data_Q1['day'] = [int(str(starttime).split(" ")[0].split("-")[2]) for starttime in chicago_bike_data_Q1['01 - Rental Details Local Start Time']]
chicago_bike_data_Q1['month'] = [int(str(starttime).split(" ")[0].split("-")[1]) for starttime in chicago_bike_data_Q1['01 - Rental Details Local Start Time']]
chicago_bike_data_Q1['hour'] = [int(str(starttime).split(" ")[1].split(":")[0]) for starttime in chicago_bike_data_Q1['01 - Rental Details Local Start Time']]
chicago_bike_data_Q2['day'] = [int(str(starttime).split(" ")[0].split("-")[2]) for starttime in chicago_bike_data_Q2['start_time']]
chicago_bike_data_Q2['month'] = [int(str(starttime).split(" ")[0].split("-")[1]) for starttime in chicago_bike_data_Q2['start_time']]
chicago_bike_data_Q2['hour'] = [int(str(starttime).split(" ")[1].split(":")[0]) for starttime in chicago_bike_data_Q2['start_time']]
chicago_bike_data_Q3['day'] = [int(str(starttime).split(" ")[0].split("-")[2]) for starttime in chicago_bike_data_Q3['start_time']]
chicago_bike_data_Q3['month'] = [int(str(starttime).split(" ")[0].split("-")[1]) for starttime in chicago_bike_data_Q3['start_time']]
chicago_bike_data_Q3['hour'] = [int(str(starttime).split(" ")[1].split(":")[0]) for starttime in chicago_bike_data_Q3['start_time']]
chicago_bike_data_Q4['day'] = [int(str(starttime).split(" ")[0].split("-")[2]) for starttime in chicago_bike_data_Q4['start_time']]
chicago_bike_data_Q4['month'] = [int(str(starttime).split(" ")[0].split("-")[1]) for starttime in chicago_bike_data_Q4['start_time']]
chicago_bike_data_Q4['hour'] = [int(str(starttime).split(" ")[1].split(":")[0]) for starttime in chicago_bike_data_Q4['start_time']]
data_groupby_day_out_Q1 = pd.DataFrame(chicago_bike_data_Q1.groupby(['from_station_name', 'month','day','hour'])['trip_id'].count()).reset_index()
data_groupby_day_in_Q1 = pd.DataFrame(chicago_bike_data_Q1.groupby(['to_station_name','month','day','hour'])['trip_id'].count()).reset_index()
data_groupby_day_out_Q1= data_groupby_day_out_Q1.rename(columns={"trip_id": "Number Of Outgoing Trips",
"from_station_name":"Station Name"})
data_groupby_day_in_Q1 = data_groupby_day_in_Q1.rename(columns={"trip_id": "Number Of Incoming Trips",
"to_station_name":"Station Name"})
new_df_Q1= | pd.merge(data_groupby_day_out_Q1,data_groupby_day_in_Q1,on=['Station Name','month','day','hour'],how='outer') | pandas.merge |
import pandas as pd
import numpy as np
from utils import is_number
import settings as conf
class EFO:
UKB_MAP = pd.read_csv(conf.OMIM_SILVER_STANDARD_UKB_EFO_MAP_FILE, sep='\t')
def __init__(self, efo_file):
"""
efo_file in CSV format downloaded from: https://bioportal.bioontology.org/ontologies/EFO
Example:
efo_file = '/mnt/data/EFO.csv.gz'
efo = EFO(efo_file)
efo.efo_data.head()
clinvar_with_efo = efo.assign_efo_term(clinvar_data)
"""
self.efo_data_full = pd.read_csv(efo_file, low_memory=False)[['Class ID', 'database_cross_reference', 'Preferred Label']]
self.efo_data_full = self.efo_data_full.rename(columns={
'Class ID': 'class_id',
'database_cross_reference': 'omim_codes',
'Preferred Label': 'preferred_label',
})
self.efo_data_full = self.efo_data_full.dropna(subset=['class_id', 'preferred_label'])
self.efo_data_omim = self.efo_data_full.drop_duplicates(subset='omim_codes').dropna()
self.efo_data_omim = self.efo_data_omim.assign(omim_codes_num=self.efo_data_omim['omim_codes'].apply(self._convert_to_mim_number)).drop(columns='omim_codes')
self.efo_data_omim = self.efo_data_omim.rename(columns={'omim_codes_num': 'omim_codes'})
# unnest omim_codes column
newvalues = np.dstack((np.repeat(self.efo_data_omim['preferred_label'].values, list(map(len, self.efo_data_omim['omim_codes'].values))), np.concatenate(self.efo_data_omim['omim_codes'].values)))
self.efo_data = pd.DataFrame(data=newvalues[0], columns=['preferred_label', 'omim'])
self.efo_data['omim'] = self.efo_data['omim'].astype(int)
def _convert_to_mim_number(self, x):
tmp = [i for i in x.split('|') if i.startswith('OMIM:')]
tmp = [t.split(':')[1] for t in tmp]
tmp = [int(t) for t in tmp if is_number(t)]
return tmp
def assign_mim_from_efo(self, df, efo_column='efo_name', copy=True):
"""
Adds a new column to df with the MIM code for the EFO code.
mim_column is the name of the column containing the MIMI code.
"""
if copy:
df2 = df.copy()
else:
df2 = df
efo_to_mim = self.efo_data.set_index('preferred_label')
def _assign_mim_from_efo(efo):
if not isinstance(efo, list):
efo = [efo]
mim_terms = []
for x in efo:
if x not in efo_to_mim.index:
continue
mim_values = efo_to_mim.loc[[x]]['omim'].tolist()
mim_terms.extend(mim_values)
if len(mim_terms) == 0:
return None
# in mim mapped to several EFO terms, then take the first one
mim_term_df = pd.Series(mim_terms).value_counts()
return mim_term_df.index[0]
return df2.assign(mim_code=df2[efo_column].apply(_assign_mim_from_efo))
def assign_efo_from_mim(self, df, mim_column='DiseaseMIM', copy=True):
"""
Adds a new column to df with the EFO term name for the MIM code.
mim_column is the name of the column containing the MIMI code.
"""
if copy:
df2 = df.copy()
else:
df2 = df
mim_to_efo = self.efo_data.set_index('omim')
def _assign_efo_from_mim(mim):
if not isinstance(mim, list):
mim = [mim]
efo_terms = []
for x in mim:
if x not in mim_to_efo.index:
continue
efo_values = mim_to_efo.loc[[x]]['preferred_label'].tolist()
efo_terms.extend(efo_values)
if len(efo_terms) == 0:
return None
# in mim mapped to several EFO terms, then take the first one
efo_term_df = | pd.Series(efo_terms) | pandas.Series |
import pandas as pd
import numpy as np
import datetime as dt
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
import matplotlib.pyplot as plt
import os
import logging
import json
# data directory containing the raw NMIR files
NMIR_DATA_DIR = "data/NMIR"
# savepath for training job outputs
OUTPUT_DIR = "output"
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
# Import the database for AIRACs
AIRAC_DF = pd.read_csv('data/AIRAC_dates.csv')
AIRAC_DF['start_date'] = pd.to_datetime(AIRAC_DF['start_date'])
AIRAC_DF['end_date'] = pd.to_datetime(AIRAC_DF['end_date'])
# Import the delay categorization for evaluation
DELAY_CATG_DF = pd.read_csv('data/delay_categorization.csv')
# Define all possible Regulation types
REGULATION_TYPES = ['C - ATC Capacity', 'W - Weather', 'S - ATC Staffing',
'G - Aerodrome Capacity', 'I - ATC Ind Action',
'M - Airspace Management', 'O - Other', 'P - Special Event',
'T - ATC Equipment', 'V - Environmental Issues',
'E - Aerodrome Services', 'R - ATC Routeings',
'A - Accident/Incident', 'N - Ind Action non-ATC']
def format_raw_NMIR_df(df):
'''Function to format the raw NMIR dataframe and keep only the important columns.
Args:
df (DataFrame): raw NMIR dataframe
Returns:
df (DataFrame): formatted NMIR dataframe
'''
df['ACC'] = df['TVS Id'].str[0:4] #First four letters of 'TVS Id' is considered as ACC
df['Regulation Start Time'] = pd.to_datetime(df['Regulation Start Time'])
df['Date'] = df['Regulation Start Time'].dt.date
df['Datetime'] = pd.to_datetime(df['Date'])
df['Regulation Activation Date'] = pd.to_datetime(df['Regulation Activation Date'])
df = df.loc[df['ATFM Delay (min)']>0].reset_index(drop=True)
columns_to_retain = ['ACC', 'Date', 'Datetime', 'Regulation Start Time',
'Regulation End Date', 'Regulation Activation Date',
'Regulation Duration (min)', 'Regulation Cancel Status',
'Regulation Cancel Date', 'Regulation Reason Name',
'ATFM Delay (min)', 'MP Delayed Traffic']
df = df[columns_to_retain]
return df
def get_airac(date):
'''Function to get the airac cycle (1 to 13) for any date.
Args:
date (datetime object): the date for which the AIRAC cycle is required.
Returns:
airac (int): AIRAC (1 to 13) for the date.
'''
airac = AIRAC_DF.loc[(AIRAC_DF['start_date']<=date) & (AIRAC_DF['end_date']>date)]['AIRAC_cycle'].iloc[0]
return airac
def get_regulation_count(day_df):
'''Function to count the number of regulations for each type of regulation for a day.
Args:
day_df (pandas df): The dataframe from which the counts are to be made.
Returns:
reg_counts_list (list): count of each type of regulations for a day as a list
'''
reg_counts_list = []
for rt in REGULATION_TYPES:
try:
reg_count = day_df['Regulation Reason Name'].value_counts()[rt]
except KeyError:
reg_count = 0
reg_counts_list.append(reg_count)
return reg_counts_list
def build_basic_features(day_df):
'''Function to build a feature list for a day from the NMIR dataframe list of regulations for a day
Args:
day_df (pandas df): the dataframe containing the list of regulations (cut off by activation time ex: 6AM) for a day.
Returns:
features (list): the features for a day.
'''
datetime_0hrs = day_df.iloc[0]['Datetime']
count_reg_pub = day_df.shape[0]
avg_reg_dur_pub = day_df['Regulation Duration (min)'].mean()
d_op_activation_counts = day_df.loc[day_df['Regulation Activation Date']>datetime_0hrs].shape[0]
count_num_ACC_pub = len(day_df['ACC'].unique().tolist())
weekday = day_df.loc[0]['Datetime'].dayofweek
airac = get_airac(day_df.loc[0]['Datetime'])
reg_counts_list = get_regulation_count(day_df)
features = [count_reg_pub, avg_reg_dur_pub, d_op_activation_counts, count_num_ACC_pub, weekday, airac] + reg_counts_list
return features
def build_labels(day_df):
'''Function to build the labels for a day from the NMIR dataframe list of regulations for a day.
Args:
day_df (pandas df): The dataframe containing the list of regulations (cut off by dayEndHrs ex: 24 represnting end of the day) for a day.
Returns:
labels (list): the labels ['ATFM Delay (min)', 'MP Delayed Traffic'] for a day.
'''
atfm_delay = day_df['ATFM Delay (min)'].sum()
mp_delyed_traffic = day_df['MP Delayed Traffic'].sum()
labels = [atfm_delay, mp_delyed_traffic]
return labels
def transform_to_daywise_basic(raw_df, pub_cut_off_hrs=6, day_end_hrs=24, encode=False):
'''Function to transform raw NMIR dataframe into a daywise dataframe with features and labels.
Args:
raw_df (pandas df): The raw NMIR dataframe.
pubCutOffHrs (int): This number represents the hours (0 to 24) which is used to seperate a days'
list of regulations as a snapshot to build the features. This is based on the 'activation time' column.
dayEndHrs (int): This number represents the target time hour (0 to 24) which is used as target delays as labels for the day.
Returns:
daywise_df: A daywise dataframe with features and lables
'''
raw_df = raw_df.reset_index(drop=True)
raw_df = format_raw_NMIR_df(raw_df)
dates = raw_df.groupby(by ='Date',as_index=False).sum()['Date'].tolist() # get a list of all available dates in the df
daywise_rowdata_list = []
for d in dates:
day_df = raw_df.loc[raw_df['Date']==d].reset_index(drop=True)
day_begin_time = pd.to_datetime(d) # convert date to datetime to get 00:00 hrs time-stamp
reg_cut_off_time = day_begin_time + dt.timedelta(hours=pub_cut_off_hrs)
day_end_time = day_begin_time + dt.timedelta(hours=24)
# select only regulations activated from the start of the day until the cutoff time
day_df = day_df.loc[day_df['Regulation Activation Date']<=reg_cut_off_time].reset_index(drop=True)
# build features from the filtered daily regulations list
day_features = build_basic_features(day_df)
day_labels = build_labels(day_df.loc[day_df['Regulation Start Time']<=day_end_time])
daywise_rowdata_list.append([d] + day_features + day_labels)
header = ['Date', 'CountRegPub', 'AvgRegDurPub', 'DopActivationCounts','CountNumACCPub',
'WeekDay', 'AIRAC'] + REGULATION_TYPES + ['ATFM Delay (min)', 'MP Delayed Traffic']
return | pd.DataFrame(daywise_rowdata_list, columns=header) | pandas.DataFrame |
#Tools for reading and analysis of data from Schlumberger CTD Divers
from pandas import read_csv
from pandas import concat
from pandas import DataFrame
"""
Functions to read Schlumberger diver logger files.
"""
#read in the CSV file from a CTD diver and return a pandas DataFrame
def readCTD(csvfile):
"""
Reads data from a CSV or MON file exported from a Schlumberger CTD Diver.
Parameters
----------
csv_file : string
A string containing the file name of the CSV or MON file to be read.
Returns
-------
df : pandas.DataFrame
DataFrame containing data from HOBO csv file.
"""
if csvfile.endswith('MON'):
sep = '\s\s\s\s*'
else:
sep = ','
# header = 0
skiprows = 66 #this is somewhat weak, number of lines could change over time??
index_col = 0
names = ['Pressure', 'Temperature', 'Conductivity']
parse_dates = True
skip_footer = 1
df = read_csv(csvfile, sep=sep, names=names, skiprows=skiprows, index_col=index_col, parse_dates=parse_dates, skip_footer=skip_footer)
return df
#read in the CSV file from a CTD diver and return a pandas DataFrame
def readBaro(csvfile):
"""
Reads data from a CSV or MON file from a Schlumberger Baro Diver.
Parameters
----------
csv_file : string
A string containing the file name of the CSV or MON file to be read.
Returns
-------
df : pandas.DataFrame
DataFrame containing data from HOBO csv file.
"""
if csvfile.endswith('MON'):
sep = '\s\s\s\s*'
else:
sep = ','
# header = 0
skiprows = 54 #this is somewhat weak, number of lines could change over time??
index_col = 0
names = ['Pressure', 'Temperature']
parse_dates = True
skip_footer = 1
df = read_csv(csvfile, sep=sep, names=names, skiprows=skiprows, index_col=index_col, parse_dates=parse_dates, skip_footer=skip_footer)
return df
#
#
# Arguments:
# dflist = a list of dataframes
#
# zero_shift =
#
# #
#
def concatCTD(dflist, zero_shift = True, n_to_average = 5, offset_list=[], offset_dates = []):
"""
Accepts a list of CTD DataFrames and concatenates them.
Parameters
----------
dflist : list
List of pandas.DataFrames to concatenate.
zero_shift : boolean
If set to True, the pressure values will be adjusted at the time of each join, assuming that flow depth before and after the join was equal. If set to False, no adjustment will be made in pressure values. This is useful when downloading the logger may have resulted in a slightly different position in the water column. (Default = True)
n_to_average : int
Number of data points to average before and after join in order to determine data offset value for pressure
offset_list : list
List of offsets to be applied manually to pressure data.
offset_dates : list
List of datetime strings corresponding to manual offsets.
Returns
-------
(concatenated : pandas.DataFrame, offset_list : pandas.DataFrame)
A tuple is returned with the first item being a DataFrame object containing the concatenated data and the second item in the tuple being a DataFrame object containing offsets with datetimes of the offsets as an index.
"""
concatenated = None
if zero_shift == False:
#concatenate with no shifting
#note: might want to add some capability to handle overlapping data
concatenated = concat(dflist)
else:
if len(offset_list) > 0:
#offset each data file by the value in offset list
if len(offset_list) != len(dflist) - 1:
print("Number of elements in offset_list must be one less than number of data files to concatenate")
return None
else:
for i, df in enumerate(dflist):
if i != 0: #skip first data frame
df['Pressure'] = df['Pressure'] + offset_list[i-1]
else:
for i, df in enumerate(dflist):
if i != 0: #skip first data frame
#in tail/head we throw out last/first data point
#get average value from tail of previous data
tail_values = dflist[i-1]['Pressure'][-n_to_average-1:-1]
tail_average = tail_values.mean()
#get average value from head of following data
head_values = df['Pressure'][1:n_to_average+1]
head_average = head_values.mean()
delta = tail_average - head_average
offset_dates.append(df.index[0])
offset_list.append(delta)
df['Pressure'] = df['Pressure'] + delta
concatenated = | concat(dflist) | pandas.concat |
"""
Test suite for forecasting module.
"""
import pandas as pd
from forecasting import pandas_to_patients
from hospital.people import Patient
PATIENT_LIST = [
Patient(
name="John",
sex="male",
weight=70,
department="surgery",
age=37,
specialty="trauma_and_orthopaedic",
is_known_covid=False,
is_suspected_covid=False,
is_acute_surgical=True,
is_elective=False,
needs_mobility_assistence=False,
is_dementia_risk=False,
is_high_acuity=True,
is_immunosupressed=False,
is_end_of_life=False,
is_infection_control=False,
is_falls_risk=False,
needs_visual_supervision=False,
expected_length_of_stay=37,
length_of_stay=0,
),
Patient(
name="Sarah",
sex="female",
weight=70,
department="medicine",
age=76,
specialty="cardiology",
is_known_covid=True,
is_suspected_covid=False,
is_acute_surgical=False,
is_elective=True,
needs_mobility_assistence=True,
is_dementia_risk=True,
is_high_acuity=True,
is_immunosupressed=False,
is_end_of_life=False,
is_infection_control=False,
is_falls_risk=True,
needs_visual_supervision=False,
expected_length_of_stay=59,
length_of_stay=0,
),
]
PATIENT_JSON = {
"0": {
"DIM_PATIENT_ID": "John",
"SEX_DESC": "Male",
"AGE": "37",
"1-2-1": "0.0",
"COVID Positive": "0.0",
"COVID Re-Swab": "0.0",
"Dementia": "0.0",
"End Of Life": "0.0",
"Exposed to COVID": "0.0",
"Falls": "0.0",
"Learning Disabilities": "1.0",
"MH High Risk": "0.0",
"Visual Impairment": "0.0",
"Visual Supervision": "0.0",
"ELECTIVE": "0",
"ADMIT_DIV": "Surgery",
"ADMIT_SPEC": "Trauma & Orthopaedic",
"LOS_HOURS": "37",
},
"1": {
"DIM_PATIENT_ID": "Sarah",
"SEX_DESC": "Female",
"AGE": "76",
"1-2-1": "0.0",
"COVID Positive": "1.0",
"COVID Re-Swab": "0.0",
"Dementia": "1.0",
"End Of Life": "0.0",
"Exposed to COVID": "0.0",
"Falls": "1.0",
"Learning Disabilities": "0.0",
"MH High Risk": "0.0",
"Visual Impairment": "0.0",
"Visual Supervision": "0.0",
"ELECTIVE": "1",
"ADMIT_DIV": "Medicine",
"ADMIT_SPEC": "Cardiology",
"LOS_HOURS": "59",
},
}
def test_pandas_to_patients():
patient_df = | pd.DataFrame(PATIENT_JSON) | pandas.DataFrame |
# coding: utf-8
import pandas as pd
import numpy as np
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource
from bokeh.models import HoverTool, PanTool, WheelZoomTool, BoxSelectTool, TapTool, OpenURL
from bokeh.models import GMapPlot, GMapOptions, Circle, DataRange1d, Range1d
from bokeh.io import curdoc
from bokeh.layouts import row, column, widgetbox, gridplot
from bokeh.models.widgets import Select, Slider, TextInput, DataTable, TableColumn, Div, Select
import itertools
import os
import logging
logger = logging.getLogger(__name__)
# data file locations
MAP_DATA_FILE = 'proteomz_stns.csv'
PROTEOMZ_DATA_FILE = 'ExampleDataset.csv'
TAXA_KEYS_DATA_FILE = 'Taxa_keys.csv'
# plot tools
TOOLS = "box_zoom, pan, xwheel_zoom, reset"
# global visual parameters
TITLE_TEXT_SIZE = '20px'
# introduction text
INTRODUCTION_HTML = """<b>Ocean Proteomics data from <i>Falkor ProteOMZ Expedition</i> in January-February of 2016.
The prototype interactive display can explore millions of protein values from over a hundred 300 liter samples
collected in the Central Pacific Oxygen Minimum Zone to depths greater than 1 kilometer. Use the sliders and
menus to enable scientific discovery within this novel dataset. <u>*NOTE: This is an example dataset containing <i>shuffled protein
annotations.</i> Public release of this dataset coming soon. </u> """
INTRODUCTION_WIDTH = 380
INTRODUCTION_HEIGHT = 130
# map visual parameters
MAP_WIDTH = 400
MAP_HEIGHT = 750
MAP_TITLE = 'ProteOMZ EXPEDITION FALKOR 2015'
MAP_LAT = 7.29
MAP_LON = -145.73
MAP_ZOOM = 4
# For map to work, uncomment this line and put your own google API key (https://developers.google.com/maps/documentation/javascript/get-api-key)
# MAP_API_KEY =
MAP_TYPE = 'hybrid'
DESELECTED_STATION_COLOR = 'white'
SELECTED_STATION_COLOR = 'red'
# profile visual parameters
PROFILE_TITLE = 'The Vertical Distribution of Microbial Proteins'
PROFILE_X_LABEL = 'Relative Abundance (Spectral Counts)'
PROFILE_Y_LABEL = 'Depth in the Ocean (meters)'
PROFILE_LINE_COLOR = 'red'
MAX_PROFILES = 1200
PROFILE_WIDTH = 600
PROFILE_HEIGHT = 1100
# histogram visual parameters
HISTOGRAM_TITLE = 'All Spectra/IDs'
HISTOGRAM_X_LABEL = 'Sum of Proteins/Spectra'
HISTOGRAM_WIDTH = 400
HISTOGRAM_HEIGHT = 1100
# bar chart visual parameters
TAXA_BAR_TITLE = 'The Diversity of Microbial Proteins'
TAXA_BAR_WIDTH = 600
TAXA_BAR_HEIGHT = 350
TAXA_BAR_COLORS = ["#e6ab02", "#1f78b4", "#b2182b", "#7570b3", "#e7298a", "#66a61e",
"#d95f02", "#666666"] #, "#1b9e77"]
#table settings
TAXON_TABLE_WIDTH=600
TAXON_TABLE_HEIGHT=750
# initial selections
ALL = 'ALL'
INIT_TAXA_GROUP = ALL
INIT_EC_GROUP = ALL
INIT_PCTILE = 95
INIT_NUT = 'N+N'
INIT_PROT = 'P1'
ST_SELECT_TITLE = 'Station'
NUT_SELECT_TITLE = 'Select Hydrographic Parameter for Correlation'
TN_SELECT_TITLE = 'Select Microbial Taxon'
EC_SELECT_TITLE = 'Major Enzyme Classes'
PERCENTILE_SLIDER_TITLE = 'Percentile (Note: be patient below 90%)'
EC_GROUPS = ['Oxidoreductases','Transferases', 'Hydrolases', 'Lyases', 'Isomerases', 'Ligases']
# computing axis ranges
def compute_profile_axis_ranges(z, station_counts):
# compute plot axis ranges for profile plot
max_z, min_z = z.max(), z.min()
min_c, max_c = 0, station_counts.max().max()
return (max_z, min_z), (min_c, max_c)
def compute_histogram_axis_ranges(histogram_datasource):
# compute plot axis ranges for histogram
min_h = 0
max_h = max(histogram_datasource.data['prot_cts']) * 1.5
return (min_h, max_h)
# main container
class Visualization(object):
def __init__(self):
"""read data and construct plot elements in their initial state"""
self.read_data()
z, station_counts, hydrography_counts, all_counts, selected_nut = self.select_initial_data(self.stations[0])
self.construct_datasources(z, station_counts, hydrography_counts, all_counts, selected_nut)
# create plots and widgets
self.make_plots(z, station_counts, hydrography_counts, selected_nut)
self.make_widgets()
def read_data(self):
"""read data and transform into dataframes"""
self._read_map_data()
self._read_proteomz_with_metadata()
def _read_map_data(self):
# second column data source for map stn/lat/long only, single point per stn
self.stn_coor = | pd.read_csv(MAP_DATA_FILE, index_col=None) | pandas.read_csv |
import os
import json
import datetime
import csv
import string
import gensim
from gensim import corpora
from gensim.models.coherencemodel import CoherenceModel
import nltk
from nltk.corpus import words, stopwords, wordnet
from nltk.tokenize import RegexpTokenizer
from nltk.stem import PorterStemmer, WordNetLemmatizer
from nltk import pos_tag
from nltk.tokenize import word_tokenize
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import collections
import numpy as np
from PIL import Image
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
#dictionary_words = dict.fromkeys(words.words(), None)
#stopWords = set(stopwords.words('english'))
#tokenizer = RegexpTokenizer(r'\w+')
#stemmer = PorterStemmer()
#lemmatiser = WordNetLemmatizer()
stop = set(stopwords.words('english'))
stop.add ('u')
stop.add ('e')
exclude = set(string.punctuation)
lemma = WordNetLemmatizer()
#topic_doc_complete = []
#lda_model = ""
class TwitterTopics:
def __init__(self, folder_path, mongoDB_database=None):
self.folder_path = folder_path
self.lda_model = object()
self.lsi_model = object()
self.doc_term_matrix = object()
self.dictionary = object()
self.lda_coh_u_mass = 0
self.lda_coh_c_v = 0
self.lsi_coh_u_mass = 0
self.lsi_coh_c_v = 0
self.db = mongoDB_database
if mongoDB_database is not None:
self.c_topics = self.db.topics
else:
self.c_topics = None
def __del__(self):
self.folder_path = None
self.lda_model = None
self.lsi_model = None
self.doc_term_matrix = None
self.dictionary = None
self.lda_coh_u_mass = None
self.lda_coh_c_v = None
self.lsi_coh_u_mass = None
self.lsi_coh_c_v = None
def get_coh_u_mass(self):
return self.lda_coh_u_mass, self.lsi_coh_u_mass
def get_coh_c_v(self):
return self.lda_coh_c_v, self.lda_coh_c_v
#create one array with all tweets of one hashtag for topic analysis
def get_docs_from_file(self, file_path):
docs = []
with open(file_path, 'r', encoding='utf8', errors='ignore') as f:
for line in f:
docs.append(line)
f.close()
return docs
#clean documents for topic analysis
def clean_docs(self, doc, delete_numbers=True, delete_stop_words=True, lemmatize_words=True):
doc_clean = doc
if delete_numbers ==True:
doc_clean = doc.replace('1', '').replace('2', '').replace('3', '').replace('4', '').replace('5', '').replace('6', '').replace('7', '').replace('8', '').replace('9', '').replace('0', '')
if delete_stop_words == True:
doc_clean = " ".join([i for i in doc_clean.lower().split() if i not in stop])
doc_clean = ''.join(ch for ch in doc_clean if ch not in exclude)
if lemmatize_words == True:
doc_clean = " ".join(lemma.lemmatize(word) for word in doc_clean.split())
return doc_clean
#train model
def train_model(self, topic_docs, num_topics, model_name, blnSaveinDB=False, blnSaveTrainedModelFiles=False, txtFileName=None,
model_type='both', lda_num_of_iterations=150, delete_stop_words=True, lemmatize_words=True, delete_numbers=True):
#starttime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
#print("Executing train_model... Started at: " + starttime )
doc_clean = [self.clean_docs(doc, delete_numbers, delete_stop_words, lemmatize_words).split() for doc in topic_docs]
# Creating the term dictionary of our corpus, where every unique term is assigned an index. dictionary = corpora.Dictionary(doc_clean)
self.dictionary = corpora.Dictionary(doc_clean)
# Converting list of documents (corpus) into Document Term Matrix using dictionary prepared above.
self.doc_term_matrix = [self.dictionary.doc2bow(doc) for doc in doc_clean]
# Creating the object for LDA model using gensim library
Lda = gensim.models.ldamodel.LdaModel
file_data = []
if model_type in ('lda', 'both'):
# Build the LDA model
self.lda_model = gensim.models.LdaModel(corpus=self.doc_term_matrix, num_topics=num_topics, id2word=self.dictionary, iterations=lda_num_of_iterations)
#get LDA coherence
self.lda_coh_u_mass = CoherenceModel(model=self.lda_model, corpus=self.doc_term_matrix, dictionary=self.dictionary, coherence='u_mass')
self.lda_coh_c_v = CoherenceModel(model=self.lda_model, texts=doc_clean, dictionary=self.dictionary, coherence='c_v')
#create json file with lda results
for idx in range(num_topics):
topic = idx+1
strtopic = str(topic)
data = '{"model_name":"' + model_name + \
'", "model_type":"' + 'lda' + \
'", "timestamp":"' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + \
'", "no_tweets":"' + str(len(topic_docs)) + \
'", "coh_u_mass":"' + str(self.lda_coh_u_mass.get_coherence()) + \
'", "coh_c_v":"' + str(self.lda_coh_c_v.get_coherence()) + \
'", "topic_no":"' + strtopic + \
'", "topic":"' + str(self.lda_model.print_topic(idx, num_topics)).replace('"', "-") + '"}'
x = json.loads(data)
file_data.append(x)
if model_type in ('lsi', 'both'):
# Build the LSI model
self.lsi_model = gensim.models.LsiModel(corpus=self.doc_term_matrix, num_topics=num_topics, id2word=self.dictionary)
#get LSI coherence
self.lsi_coh_u_mass = CoherenceModel(model=self.lsi_model, corpus=self.doc_term_matrix, dictionary=self.dictionary, coherence='u_mass')
self.lsi_coh_c_v = CoherenceModel(model=self.lsi_model, texts=doc_clean, dictionary=self.dictionary, coherence='c_v')
#create json file with lsi results
for idx in range(num_topics):
topic = idx+1
strtopic = str(topic)
data = '{"model_name":"' + model_name + \
'", "model_type":"' + 'lsi' + \
'", "timestamp":"' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + \
'", "no_tweets":"' + str(len(topic_docs)) + \
'", "coh_u_mass":"' + str(self.lsi_coh_u_mass.get_coherence()) + \
'", "coh_c_v":"' + str(self.lsi_coh_c_v.get_coherence()) + \
'", "topic_no":"' + strtopic + \
'", "topic":"' + str(self.lsi_model.print_topic(idx, num_topics)).replace('"', "-") + '"}'
x = json.loads(data)
file_data.append(x)
# Save if mongoDB collection is asked
if blnSaveinDB == True:
if self.db is not None:
self.c_topics.insert_many(file_data)
else:
print("Can't save topics in db. No mongoDB connection was set up.")
# Save results in a text file
if txtFileName is not None:
with open(txtFileName, 'w', encoding="utf-8") as outfile:
json.dump(file_data, outfile)
# Save models into file
if blnSaveTrainedModelFiles == True:
#creates path if does not exists
if not os.path.exists(self.folder_path + "/trained_models/"):
os.makedirs(self.folder_path + "/trained_models/")
self.lda_model.save(self.folder_path + "/trained_models/" + model_name + "_lda_model.model")
self.dictionary.save(self.folder_path + "/trained_models/" + model_name + "_dictionary.dict")
#endtime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
#print("Finished executing train_model. Ended at: " + endtime)
#train model from file
def train_model_from_file(self, file_path, num_topics, model_name, blnSaveinDB=False, blnSaveTrainedModelFiles=False, txtFileName=None,
model_type='both', lda_num_of_iterations=150, delete_stop_words=True, lemmatize_words=True, delete_numbers=True):
docs = self.get_docs_from_file(file_path)
self.train_model(docs, num_topics, model_name, blnSaveinDB, blnSaveTrainedModelFiles, txtFileName, model_type, lda_num_of_iterations, delete_stop_words, lemmatize_words, delete_numbers)
#plot graph with lda topics
def plot_topics(self, file_name, no_of_topics, model_type = 'lda', fig_size_x = 17, fig_size_y=15, replace_existing_file=True):
if replace_existing_file==True or not os.path.exists(file_name):
fig_size_y = 7*(no_of_topics/2)
fiz=plt.figure(figsize=(fig_size_x, fig_size_y))
for i in range(no_of_topics):
if model_type == 'lda':
df=pd.DataFrame(self.lda_model.show_topic(i), columns=['term','prob']).set_index('term')
elif model_type == 'lsi':
df=pd.DataFrame(self.lsi_model.show_topic(i), columns=['term','prob']).set_index('term')
no_rows = int(no_of_topics/2)+no_of_topics%2
plt.subplot(no_rows,2,i+1)
plt.title('topic '+str(i+1))
sns.barplot(x='prob', y=df.index, data=df, label='Cities', palette='Reds_d')
plt.xlabel('probability')
#save the file
plt.savefig(file_name, dpi=200, facecolor='w', edgecolor='w')
#plt.show()
plt.cla() # Clear axis
plt.clf() # Clear figure
plt.close() # Close a figure window
# read a frequency list into a pandas objects
# file format word\tfrequency
def read_freq_list_file(self, file_path, delimiter='\t'):
#df = pd.read_csv(file_path, encoding = "ISO-8859-1", header=None, sep=delimiter, lineterminator='\n')
df = | pd.read_csv(file_path, encoding = "utf-8", header=None, sep=delimiter, lineterminator='\n') | pandas.read_csv |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.