prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
import scipy.stats
from scipy import stats
from scipy.stats import f_oneway
# Input parameters: sample (pd df), population mean (float), significance level (optional float)
# Return values: p-value, t-value, confidence interval (lower), confidence interval (upper), mean of sample, reject/accept (1 = accept, 0 = reject)
def t_test_1_samp(x, pop_mean, sig_lvl=0.05):
out = 0
if out:
print("One Sample t-test")
samp_mean = float(x.mean())
samp_sd = float(x.std())
n = x.shape[0]
t = float((samp_mean - pop_mean) / (samp_sd / pow(n, 0.5)))
p = scipy.stats.t.sf(abs(t), df=n - 1) * 2
con_1, con_2 = scipy.stats.t.interval(
alpha=1 - sig_lvl, df=n - 1, loc=samp_mean, scale=scipy.stats.sem(x)
)
con_1 = float(con_1)
con_2 = float(con_2)
if out:
print("t = " + str(t))
print("df = " + str(n - 1))
print("p-value = " + str(p))
accept = 1
if p > sig_lvl:
if out:
print("Alternative hypothesis: true mean is not equal to " + str(pop_mean))
accept = 0
else:
if out:
print("Null hypothesis: true mean is equal to " + str(pop_mean))
if out:
print(
str(100 * (1 - sig_lvl))
+ "% confidence interval: "
+ str(con_1)
+ " "
+ str(con_2)
)
print("Mean of x: " + str(samp_mean))
result = {
"p_value": p,
"t_value": t,
"con_low": con_1,
"con_up": con_2,
"sample_mean_1": samp_mean,
"accept": accept,
}
return result
# Input parameters: sample 1 (pd df), sample 2 (pd df), significance level (optional float)
# Return values: p-value, t-value, confidence interval (lower), confidence interval (upper), mean of sample 1, mean of sample 2, reject/accept (1 = accept, 0 = reject)
def t_test_welch(x, y, sig_lvl=0.05):
out = 0
if out:
print("Welch Two sample t-test (unequal variance)")
mu_1 = float(x.mean())
mu_2 = float(y.mean())
s1 = x.std()
s2 = y.std()
n1 = x.shape[0]
n2 = y.shape[0]
t, p = stats.ttest_ind(x, y, equal_var=False)
t = float(t)
p = float(p)
con_1 = float(
(mu_1 - mu_2)
- (
scipy.stats.t.ppf((1 - sig_lvl / 2), n1 + n2 - 2)
* pow(((((n1 - 1) * s1 * s1) + ((n2 - 1) * s2 * s2)) / (n1 + n2 - 2)), 0.5)
* pow((1 / n1 + 1 / n2), 0.5)
)
)
con_2 = float(
(mu_1 - mu_2)
+ (
scipy.stats.t.ppf((1 - sig_lvl / 2), n1 + n2 - 2)
* pow(((((n1 - 1) * s1 * s1) + ((n2 - 1) * s2 * s2)) / (n1 + n2 - 2)), 0.5)
* pow((1 / n1 + 1 / n2), 0.5)
)
)
if out:
print("t = " + str(t))
print("df = " + str(n1 + n2 - 2))
print("p-value = " + str(p))
accept = 1
if p > sig_lvl:
if out:
print("Alternative hypothesis: true difference in means is not equal to 0")
accept = 0
else:
if out:
print("Null hypothesis: true difference in means is equal to 0")
if out:
print(
str(100 * (1 - sig_lvl))
+ "% confidence interval: "
+ str(con_1)
+ " "
+ str(con_2)
)
print("Mean of x and mean of y (respectively): " + str(mu_1) + ", " + str(mu_2))
print()
result = {
"p_value": p,
"t_value": t,
"con_low": con_1,
"con_up": con_2,
"sample_mean_1": mu_1,
"sample_mean_2": mu_2,
"accept": accept,
}
return result
# Input parameters: sample 1 (pd df), sample 2 (pd df), significance level (optional float)
# Return values: p-value, t-value, confidence interval (lower), confidence interval (upper), mean of sample 1, mean of sample 2, reject/accept (1 = accept, 0 = reject)
def t_test_2_samp_equal_var(x, y, sig_lvl=0.05):
out = 0
if out:
print("Two sample t-test (equal variance)")
mu_1 = float(x.mean())
mu_2 = float(y.mean())
s1 = x.std()
s2 = y.std()
n1 = x.shape[0]
n2 = y.shape[0]
t = float((mu_1 - mu_2) / (pow((s1 * s1 / n1) + (s2 * s2 / n2), 0.5)))
p = scipy.stats.t.sf(abs(t), df=n1 + n2 - 2) * 2
con_1 = float(
(mu_1 - mu_2)
- (
scipy.stats.t.ppf((1 - sig_lvl / 2), n1 + n2 - 2)
* pow(((((n1 - 1) * s1 * s1) + ((n2 - 1) * s2 * s2)) / (n1 + n2 - 2)), 0.5)
* pow((1 / n1 + 1 / n2), 0.5)
)
)
con_2 = float(
(mu_1 - mu_2)
+ (
scipy.stats.t.ppf((1 - sig_lvl / 2), n1 + n2 - 2)
* pow(((((n1 - 1) * s1 * s1) + ((n2 - 1) * s2 * s2)) / (n1 + n2 - 2)), 0.5)
* pow((1 / n1 + 1 / n2), 0.5)
)
)
if out:
print("t = " + str(t))
print("df = " + str(n1 + n2 - 2))
print("p-value = " + str(p))
accept = 1
if p > sig_lvl:
if out:
print("Alternative hypothesis: true difference in means is not equal to 0")
accept = 0
else:
if out:
print("Null hypothesis: true difference in means is equal to 0")
if out:
print(
str(100 * (1 - sig_lvl))
+ "% confidence interval: "
+ str(con_1)
+ " "
+ str(con_2)
)
print("Mean of x and mean of y (respectively): " + str(mu_1) + ", " + str(mu_2))
print()
result = {
"p_value": p,
"t_value": t,
"con_low": con_1,
"con_up": con_2,
"sample_mean_1": mu_1,
"sample_mean_2": mu_2,
"accept": accept,
}
return result
# Input parameters: sample (pd df), population mean (float), significance level (optional float)
# Return values: p-value, z-value, confidence interval (lower), confidence interval (upper), mean of sample, reject/accept (1 = accept, 0 = reject)
def z_test_1_samp(x, pop_mean, sig_lvl=0.05):
out = 0
if out:
print("1 sample z-test (two-tailed)")
samp_mu = float(x.mean())
pop_std = float(x.std())
n = float(x.shape[0])
z = float((samp_mu - pop_mean) / (pop_std / pow(n, 0.5)))
p = scipy.stats.norm.sf(abs(z)) * 2
if out:
print("z: " + str(z))
print("p-value: " + str(p))
accept = 1
if p > sig_lvl:
if out:
print(
"Alternative hypothesis: the sample mean and population means are NOT equal"
)
accept = 0
else:
if out:
print("Null hypothesis: the sample mean and population means are equal")
con_1 = float(
samp_mu - scipy.stats.norm.ppf(1 - sig_lvl / 2) * pop_std / pow(n, 0.5)
)
con_2 = float(
samp_mu + scipy.stats.norm.ppf(1 - sig_lvl / 2) * pop_std / pow(n, 0.5)
)
if out:
print(
str(100 * (1 - sig_lvl))
+ "% confidence interval: "
+ str(con_1)
+ " "
+ str(con_2)
)
print("Mean of x: " + str(samp_mu))
print()
result = {
"p_value": p,
"z_value": z,
"con_low": con_1,
"con_up": con_2,
"sample_mean_1": samp_mu,
"accept": accept,
}
return result
# Input parameters: sample 1 (pd df), sample 2 (pd df), significance level (optional float)
# Return values: p-value, z-value, confidence interval (lower), confidence interval (upper), mean of sample 1, mean of sample 2, reject/accept (1 = accept, 0 = reject)
def z_test_2_samp(x, y, sig_lvl=0.05):
out = 0
if out:
print("2 sample z-test (two tailed)")
mu_1 = float(x.mean())
mu_2 = float(y.mean())
std_1 = float(x.std())
std_2 = float(y.std())
n1 = x.shape[0]
n2 = y.shape[0]
z = (mu_1 - mu_2) / pow((std_1 ** 2 / n1 + std_2 ** 2 / n2), 0.5)
p = scipy.stats.norm.sf(abs(z)) * 2
if out:
print("z: " + str(z))
print("p-value: " + str(p))
accept = 1
if p > sig_lvl:
if out:
print("Alternative hypothesis: the population means are NOT equal")
accept = 0
else:
if out:
print("Null hypothesis: the population means are equal")
con_1 = float(
(mu_1 - mu_2)
- (
scipy.stats.norm.ppf((1 - sig_lvl / 2))
* pow(
(
(((n1 - 1) * std_1 * std_1) + ((n2 - 1) * std_2 * std_2))
/ (n1 + n2 - 2)
),
0.5,
)
* pow((1 / n1 + 1 / n2), 0.5)
)
)
con_2 = float(
(mu_1 - mu_2)
+ (
scipy.stats.norm.ppf((1 - sig_lvl / 2))
* pow(
(
(((n1 - 1) * std_1 * std_1) + ((n2 - 1) * std_2 * std_2))
/ (n1 + n2 - 2)
),
0.5,
)
* pow((1 / n1 + 1 / n2), 0.5)
)
)
if out:
print(
str(100 * (1 - sig_lvl))
+ "% confidence interval: "
+ str(con_1)
+ " "
+ str(con_2)
)
print("Mean of x and mean of y (respectively): " + str(mu_1) + ", " + str(mu_2))
print()
result = {
"p_value": p,
"z_value": z,
"con_low": con_1,
"con_up": con_2,
"sample_mean_1": mu_1,
"sample_mean_2": mu_2,
"accept": accept,
}
return result
# Input parameters: pd df of group categoricals, pd df of corresponding values, significance level (optional)
# Return values: p-value, f-value, variance between, var within, degrees of freedom between, df within, df total, Sum of Squares between, ss within, ss total, accept (1 = accept, 0 = reject)
def one_way_anova(dictionary, sig_lvl=0.05):
out = 0
if out:
print("One way ANOVA")
cat_val = ""
num_val = ""
if "cat_NaN_found" in dictionary:
cat_val = dictionary.pop("cat_NaN_found")
if 'num_NaN_found' in dictionary:
num_val = dictionary.pop("num_NaN_found")
sep_values = [list(value) for key, value in dictionary.items()]
f, p = f_oneway(*sep_values)
if out:
print(f, p)
unique_groups = pd.DataFrame(list(dictionary.keys()))
k = unique_groups.shape[0]
n = sum([len(value) for key, value in dictionary.items()])
df_between = k - 1
df_within = n - k
df_total = n - 1
grand_mean = sum([item for sublist in sep_values for item in sublist]) / n
total = 0
for i in range(len(sep_values)):
group_mean = 0
for j in range(len(sep_values[i])):
group_mean = group_mean + sep_values[i][j]
group_mean = group_mean / len(sep_values[i])
total = total + (grand_mean - group_mean) ** 2 * len(sep_values[i])
total2 = 0
for i in range(len(sep_values)):
gm = 0
for j in range(len(sep_values[i])):
gm = gm + sep_values[i][j]
gm = gm / len(sep_values[i])
for j in range(len(sep_values[i])):
total2 = total2 + (sep_values[i][j] - gm) ** 2
ss_between = float(total)
ss_within = float(total2)
ss_total = float(total + total2)
var_between = float(total / df_between)
var_within = float(total2 / df_within)
row_headers = ["Sum of Squares", "d.f.", "Variance", "F", "p"]
col_headers = ["Between Groups", "Within Groups", "Total"]
data = [
[
str("%.2f" % ss_between),
str("%.0f" % df_between),
str("%.2f" % var_between),
str("%.6f" % f),
str("%.6f" % p),
],
[
str("%.2f" % ss_within),
str("%.0f" % df_within),
str("%.2f" % var_within),
"--",
"--",
],
[str("%.2f" % ss_total), str("%.0f" % df_total), "--", "--", "--"],
]
if out:
print(pd.DataFrame(data, col_headers, row_headers))
accept = 1
if p > sig_lvl:
if out:
print("Alternative hypothesis: true difference in means is not equal to 0")
accept = 0
else:
if out:
print("Null hypothesis: true difference in means is equal to 0")
if out:
print()
result = {
"p_value": p,
"f_value": f,
"var_between": var_between,
"var_within": var_within,
"df_between": df_between,
"df_within": df_within,
"df_total": df_total,
"ss_between": ss_between,
"ss_within": ss_within,
"ss_total": ss_total,
"accept": accept,
}
dictionary["cat_NaN_found"] = cat_val
dictionary["num_NaN_found"] = num_val
return result
if __name__ == "__main__":
### Testing ###
x = pd.DataFrame([1, 40, 60, 110])
y = pd.DataFrame([5, 6, 7, 8])
groups =
|
pd.DataFrame(["A", "A", "A", "A", "B", "B", "B", "B", "C", "C", "C", "C"])
|
pandas.DataFrame
|
import os
import glob
import pandas as pd
import numpy as np
import yaml
import yamlmd
import sdg
translations = sdg.translations.TranslationInputSdmx(source='https://registry.sdmx.org/ws/public/sdmxapi/rest/datastructure/IAEG-SDGs/SDG/latest/?format=sdmx-2.1&detail=full&references=children')
translations.execute()
english = translations.get_translations()['en']
sdmx_compatibility = True
path = 'SDG_Indicators_Global_BIH_oct_2020_EN.xls'
start_cols = [
'SDG target',
'SDG indicator',
'Series',
'Unit',
]
end_cols = [
'Comments',
'Sources',
'Links',
'Custodian agency',
'Link to the global metadata (1) of this indicator:',
'Link to the global metadata (2) of this indicator:',
]
# Hardcoded some details about the source data, to keep this script simple.
sheet_info = {
'SDG 1': {
'goal': 1,
'disaggregations': ['Location','Age','Reporting Type','Sex'],
'year_start': 2000,
'year_end': 2019,
},
'SDG 2': {
'goal': 2,
'disaggregations': ['Reporting Type','Age','Sex','Type of product'],
'year_start': 2000,
'year_end': 2019,
},
'SDG 3': {
'goal': 3,
'disaggregations': ['Reporting Type','Age','Sex','Name of non-communicable disease','Type of occupation', 'IHR Capacity'],
'year_start': 2000,
'year_end': 2019,
},
'SDG 4': {
'goal': 4,
'disaggregations': ['Reporting Type','Education level','Quantile','Sex','Type of skill','Location'],
'year_start': 2000,
'year_end': 2019,
},
'SDG 5': {
'goal': 5,
'disaggregations': ['Reporting Type','Age','Sex'],
'year_start': 2000,
'year_end': 2020,
},
'SDG 6': {
'goal': 6,
'disaggregations': ['Reporting Type','Location'],
'year_start': 2000,
'year_end': 2019,
},
'SDG 7': {
'goal': 7,
'disaggregations': ['Reporting Type','Location'],
'year_start': 2000,
'year_end': 2019,
},
'SDG 8': {
'goal': 8,
'disaggregations': ['Reporting Type','Activity','Sex','Age','Type of product'],
'year_start': 2000,
'year_end': 2019,
},
'SDG 9': {
'goal': 9,
'disaggregations': ['Reporting Type','Mode of transportation'],
'year_start': 2000,
'year_end': 2019,
},
'SDG 10': {
'goal': 10,
'disaggregations': ['Reporting Type','Name of international institution','Type of product'],
'year_start': 2000,
'year_end': 2019,
},
'SDG 11': {
'goal': 11,
'disaggregations': ['Reporting Type','Location'],
'year_start': 2000,
'year_end': 2020,
},
'SDG 12': {
'goal': 12,
'disaggregations': ['Reporting Type','Type of product'],
'year_start': 2000,
'year_end': 2020,
},
'SDG 13': {
'goal': 13,
'disaggregations': ['Reporting Type'],
'year_start': 2000,
'year_end': 2019,
},
'SDG 14': {
'goal': 14,
'disaggregations': ['Reporting Type'],
'year_start': 2000,
'year_end': 2019,
},
'SDG 15': {
'goal': 15,
'disaggregations': ['Reporting Type','Level/Status'],
'year_start': 2000,
'year_end': 2020,
},
'SDG 16': {
'goal': 16,
'disaggregations': ['Reporting Type','Sex','Age','Parliamentary committees','Name of international institution'],
'year_start': 2000,
'year_end': 2020,
},
'SDG 17': {
'goal': 17,
'disaggregations': ['Reporting Type','Type of speed','Type of product'],
'year_start': 2000,
'year_end': 2019,
},
}
things_to_translate = {}
strip_from_values = ['<', 'NaN', 'NA', 'fn', 'C', 'A', 'E', 'G', 'M', 'N', ',']
def clean_data_value(value):
if value == '-':
return pd.NA
for strip_from_value in strip_from_values:
value = value.replace(strip_from_value, '')
value = value.strip()
if value == '':
return pd.NA
test = float(value)
return value
def drop_these_columns():
# These columns aren't useful for some reason.
return [
# This only had 1 value in the source data.
'Reporting Type',
# This only had 1 value in the source data.
'Level/Status',
# These are in the metadata.
'SDG target',
'SDG indicator',
]
def convert_composite_breakdown_label(label):
return label.replace(' ', '_').replace('-', '_').lower()
def translate(group, key):
translated = key
if group in english and key in english[group]:
translated = english[group][key]
return translated
def get_column_name_changes():
changes = {
# These serve specific purposes in Open SDG.
'Unit': 'UNIT_MEASURE',
'Series': 'SERIES',
# These changes are for compatibility with SDMX.
}
sdmx_changes = {
'Sex': 'SEX',
'Age': 'AGE',
'Location': 'URBANISATION',
'Quantile': 'INCOME_WEALTH_QUANTILE',
'Education level': 'EDUCATION_LEV',
'Activity': 'ACTIVITY',
'IHR Capacity': 'COMPOSITE_BREAKDOWN',
'Mode of transportation': 'COMPOSITE_BREAKDOWN',
'Name of international institution': 'COMPOSITE_BREAKDOWN',
'Name of non-communicable disease': 'COMPOSITE_BREAKDOWN',
'Type of occupation': 'OCCUPATION',
'Type of product': 'PRODUCT',
'Type of skill': 'COMPOSITE_BREAKDOWN',
'Type of speed': 'COMPOSITE_BREAKDOWN',
'Parliamentary committees': 'COMPOSITE_BREAKDOWN',
'Reporting Type': 'Reporting Type',
'Level/Status': 'Level/Status',
}
if sdmx_compatibility:
changes.update(sdmx_changes)
for key in changes:
changed = changes[key]
if changed not in things_to_translate:
things_to_translate[changed] = {}
if changed == 'COMPOSITE_BREAKDOWN':
comp_breakdown_label = convert_composite_breakdown_label(key)
things_to_translate[changed][comp_breakdown_label] = key
else:
things_to_translate[changed][changed] = translate(changed, changed)
return changes
# run it right away
get_column_name_changes()
def clean_disaggregation_value(value, column=''):
if
|
pd.isna(value)
|
pandas.isna
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import pandas as pd
import sqlite3
import xlrd as xl
# if this .sqlite db doesn't already exists, this will create it
# if the .sqlite db *does* already exist, this establishes the desired connection
con = sqlite3.connect("sql_sample_db.sqlite")
# create pandas dataframes from each .csv file:
sales_table = pd.read_csv('https://github.com/NYUDataBootcamp/SQLBootcamp/blob/master/data/sales_table.csv')
car_table = pd.read_csv('https://github.com/NYUDataBootcamp/SQLBootcamp/blob/master/data/car_table.csv')
salesman_table = pd.read_csv('https://github.com/NYUDataBootcamp/SQLBootcamp/blob/master/data/salesman_table.csv')
cust_table = pd.read_csv('https://github.com/NYUDataBootcamp/SQLBootcamp/blob/master/data/cust_table.csv')
dog_table = pd.read_csv('https://github.com/NYUDataBootcamp/SQLBootcamp/blob/master/data/dog_table.csv')
cat_table = pd.read_csv('https://github.com/NYUDataBootcamp/SQLBootcamp/blob/master/data/cat_table.csv')
#%%
# make a list of the tables (dataframes) and table names:
tables = [sales_table, car_table, salesman_table, cust_table, dog_table, cat_table]
table_names = ['sales_table', 'car_table', 'salesman_table', 'cust_table', 'dog_table', 'cat_table']
# drop each table name if it already exists to avoid error if you rerun this bit of code
# then add it back (or add it for the first time, if the table didn't already exist)
for i in range(len(tables)):
table_name = table_names[i]
table = tables[i]
con.execute("DROP TABLE IF EXISTS {}".format(table_name))
pd.io.sql.to_sql(table, "{}".format(table_name), con, index=False)
# Function to make it easy to run queries on this mini-database
def run(query):
results = pd.read_sql("{}".format(query), con).fillna(' ')
return results
# create some dataframes to act as keys to clarify differences between difference rdbms
rdbms_differences = pd.DataFrame()
# show describe options
describe_index = ['Reading a table']
describe_differences = pd.DataFrame({'SQLite' : pd.Series(['PRAGMA TABLE_INFO(table_name)'], index=describe_index),
'MySQL' : pd.Series(['DESCRIBE table_name'], index=describe_index),
'Microsoft SQL Server' : pd.Series(['SP_HELP table_name'], index=describe_index),
'Oracle' : pd.Series(['DESCRIBE table_table'], index=describe_index)})
rdbms_differences = rdbms_differences.append(describe_differences)
# show limit options
limit_df_index = ['LIMITING']
limit_differences = pd.DataFrame({'SQLite' : pd.Series(['LIMIT N'], index=limit_df_index),
'MySQL' : pd.Series(['LIMIT N'], index=limit_df_index),
'Microsoft SQL Server' : pd.Series(['SELECT TOP N column_a...'], index=limit_df_index),
'Oracle' : pd.Series(['WHERE ROWNUM <=N'], index=limit_df_index)})
rdbms_differences = rdbms_differences.append(limit_differences)
# show compatibility with joins and different DBs
join_df_index = ['JOIN or INNER JOIN', 'LEFT JOIN or LEFT OUTER JOIN', 'RIGHT JOIN or RIGHT OUTER JOIN', 'OUTER JOIN or FULL OUTER JOIN']
join_differences = pd.DataFrame({'SQLite' : pd.Series(['✓', '✓', 'not supported', 'not supported'], index=join_df_index),
'MySQL' : pd.Series(['✓', '✓', '✓', 'not supported'], index=join_df_index),
'Microsoft SQL Server' : pd.Series(['✓','✓','✓','✓'], index=join_df_index),
'Oracle' : pd.Series(['✓','✓','✓','✓'], index=join_df_index)})
rdbms_differences = rdbms_differences.append(join_differences)
# show concat options:
concat_df_index = ['Concatenating']
concat_differences = pd.DataFrame({'SQLite' : pd.Series(['||'], index=concat_df_index),
'MySQL' : pd.Series(['CONCAT(column_a, column_b)'], index=concat_df_index),
'Microsoft SQL Server' : pd.Series(['CONCAT(column_a, column_b) or +'], index=concat_df_index),
'Oracle' : pd.Series(['CONCAT(column_a, column_b) or ||'], index=concat_df_index)})
rdbms_differences = rdbms_differences.append(concat_differences)
# show options for IF and CASE WHEN statements
conditional_df_index = ['IF', 'CASE WHEN']
conditional_differences = pd.DataFrame({'SQLite' : pd.Series(['not supported', '✓'], index=conditional_df_index),
'MySQL' : pd.Series(['IF(condition, value_if_true, value_if_false)', '✓'], index=conditional_df_index),
'Microsoft SQL Server' :
|
pd.Series(['IF condition PRINT value_if_true...','✓'], index=conditional_df_index)
|
pandas.Series
|
import functools
import numpy as np
import pandas as pd
import ABONO as abono
import pickle as pk
from numpy import random
TRAIN_PATH = 'results/train/'
TEST_PATH = 'results/test/'
COLS = [ 'user','night','power_increase',
'delta', 'theta', 'alpha', 'beta', 'sum_f_hat', 'sum_f_hat_sq', 'f_hat_std', 'fonda',
'delta_x', 'theta_x', 'alpha_x', 'beta_x', 'sum_f_hat_x', 'sum_f_hat_sq_x', 'f_hat_std_x', 'fonda_x',
'delta_y', 'theta_y', 'alpha_y', 'beta_y', 'sum_f_hat_y', 'sum_f_hat_sq_y', 'f_hat_std_y', 'fonda_y',
'delta_z', 'theta_z', 'alpha_z', 'beta_z', 'sum_f_hat_z', 'sum_f_hat_sq_z', 'f_hat_std_z', 'fonda_z',
'kurtosis', 'skew', 'std', 'mean', 'sum_abs', 'sum_sq', 'moment3', 'moment4',
'kurtosis_x', 'skew_x', 'std_x', 'mean_x', 'sum_abs_x', 'sum_sq_x', 'moment3_x','moment4_x',
'kurtosis_y', 'skew_y', 'std_y', 'mean_y', 'sum_abs_y', 'sum_sq_y',
'kurtosis_z', 'skew_z', 'std_z', 'mean_z', 'sum_abs_z', 'sum_sq_z', 'moment3_z', 'moment4_z',
('eeg',[i for i in range(1000,1700)]),
('respiration_x', [i for i in range(300,390)]),
('respiration_y', range(0,150)), ('respiration_z', range(0, 400))
]
MODEL = 'xgb'
PARAMS = {}
with abono.Session() as s:
s.init_train()
s.init_model()
s.init_test()
dfs = {}
tdfs = {}
for el in COLS:
if type(el) == tuple:
dfs[el[0]] = pd.read_csv(TRAIN_PATH + el[0] + '.csv')
dfs[el[0]] = dfs[el[0]][list(map(lambda x: el[0] + '_' + str(x), el[1]))]
tdfs[el[0]] =
|
pd.read_csv(TEST_PATH + el[0] + '.csv')
|
pandas.read_csv
|
import pandas as pd
from fuzzywuzzy import process
import os
dirname = os.path.dirname(__file__)
location = dirname + '/data/raw/'
for file in os.listdir(location):
df_wiki = pd.read_csv('Path_To_Wikidata_File')
df_THE = pd.read_csv(location + file,
index_col='Unnamed: 0')
df_THE['Times_Higher_Education_World_University_ID'] = df_THE['href'].str.replace('/world-university-rankings/', '')
df = pd.merge(df_THE, df_wiki, on='Times_Higher_Education_World_University_ID', how='left')
df['fuzz_score'] = -1
df_grid = pd.read_csv('Path_To_GRID_File')
for index, row in df.iterrows():
if
|
pd.isnull(df.loc[index, 'GRID_ID'])
|
pandas.isnull
|
from __future__ import print_function
import numpy as np
import pandas as pd
from sqlalchemy.dialects import sqlite
from mendeleev import (element, get_table, get_engine, get_session,
get_attr_for_group)
from mendeleev import __version__ as version
from .tables import IonizationEnergy
def get_zeff(an, method='slater'):
'A helper function to calculate the effective nuclear charge'
e = element(an)
return e.zeff(method=method)
def get_neutral_data():
'''
Get extensive set of data from multiple database tables as pandas.DataFrame
'''
elements = get_table('elements')
series = get_table('series')
groups = get_table('groups')
elements = pd.merge(elements, series, left_on='series_id', right_on='id',
how='left', suffixes=('', '_series'))
elements = pd.merge(elements, groups, left_on='group_id',
right_on='group_id', how='left',
suffixes=('', '_group'))
elements.rename(columns={'color': 'series_colors'}, inplace=True)
en_scales = ['allred-rochow', 'cottrell-sutton', 'gordy',
'martynov-batsanov', 'mulliken', 'nagle', 'sanderson']
for scale in en_scales:
elements['en_' + scale] = [element(row.symbol).electronegativity(scale=scale)
for i, row in elements.iterrows()]
for attr in ['hardness', 'softness']:
elements[attr] = [getattr(element(row.symbol), attr)()
for i, row in elements.iterrows()]
elements['mass'] = [element(row.symbol).mass_str()
for i, row in elements.iterrows()]
elements.loc[:, 'zeff_slater'] = elements.apply(
lambda x: get_zeff(x['atomic_number'], method='slater'), axis=1)
elements.loc[:, 'zeff_clementi'] = elements.apply(
lambda x: get_zeff(x['atomic_number'], method='clementi'), axis=1)
session = get_session()
engine = get_engine()
query = session.query(IonizationEnergy).\
filter(IonizationEnergy.degree == 1).\
filter(IonizationEnergy.atomic_number.in_(list(range(1, 119))))
out = pd.read_sql_query(query.statement.compile(dialect=sqlite.dialect()),
engine)
out = out[['atomic_number', 'energy']]
out.columns = ['atomic_number', 'ionization_energy']
elements =
|
pd.merge(elements, out, on='atomic_number', how='left')
|
pandas.merge
|
####################################
# author: <NAME>
# course: Python for Data Science and Machine Learning Bootcamp
# purpose: lecture notes
# description: Section 11 - Python for Data Visualization, Plotly and Cufflinks
# other: N/A
####################################
# Due to an update, I should be considering the following:
#
# 1. Possible Import Error 1: You need to install a new package. In your command
#line type and run:
# pip install chart-studio
#
# Then in jupyter make sure you import it by running the code:
# import chart_studio.plotly as py
#
# 2.Possible Colorscale Error 2: In the "Real Data US Map Choropleth", when
# you are creating the data dictionary, make sure the colorscale line
# is = 'ylorbr', not 'YIOrbr'... so like this:
# colorscale='ylorbr'
#
# 3.Possible projection Error 3: In the "World Map Choropleth", when you are
# creating the layout, ensure that your projection line
# is = {'type':'mercator'} not Mercator with a capital...so like this:
# projection={'type':'mercator'}
#conda install plotly
#pip install cufflinks # connects ployly with Pandas
# Plotly and Cufflinks
import pandas as pd
import numpy as np
#from plotly import __version__
#print(__version__)
import cufflinks as cf
from plotly.offline import download_plotlyjs, init_notebook_mode,plot,iplot
# Connecting JavaScript (interactive library) to a Python script
init_notebook_mode(connected=True)
# Making offline use of cufflinks
cf.go_offline()
# DATA
df = pd.DataFrame(np.random.randn(100,4),columns = 'A B C D'.split())
df.head()
df2 =
|
pd.DataFrame({'Category':['A','B','C'],'Values':[32,43,50]})
|
pandas.DataFrame
|
import pandas as pd
from imblearn.over_sampling import RandomOverSampler, SMOTE
from sklearn import linear_model
import statsmodels.api as sm
import numpy as np
from scipy.stats import uniform, randint
from scipy import stats
from sklearn.datasets import load_breast_cancer, load_diabetes, load_wine
from sklearn.metrics import auc, accuracy_score, confusion_matrix, mean_squared_error
from sklearn.model_selection import cross_val_score, GridSearchCV, KFold, RandomizedSearchCV, train_test_split, \
StratifiedKFold, RepeatedStratifiedKFold, GroupShuffleSplit
import matplotlib.pyplot as plt
import seaborn as sns
from xgboost import XGBClassifier
from collections import Counter
from imblearn.under_sampling import RandomUnderSampler
import xgboost as xgb
import pandas as pd
import numpy as np
import os
import math
import timeit
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import *
from sklearn.model_selection import train_test_split, KFold, StratifiedKFold
from sklearn.metrics import accuracy_score
from sklearn.model_selection import GridSearchCV
from imblearn.pipeline import Pipeline
from sklearn.metrics import roc_auc_score, roc_curve
from imblearn.pipeline import Pipeline as imbpipeline
# from sklearn.pipeline import Pipeline
import warnings
warnings.filterwarnings("ignore")
# df_2018_2019 = pd.read_csv("/mnt/nadavrap-students/STS/data/2018_2019.csv")
# print(df_2018_2019)
df_all = pd.read_csv("/tmp/pycharm_project_723/new data sum info surg and Hosp numeric values.csv")
# df_all = pd.read_csv("/tmp/pycharm_project_723/imputed_data_with_numerical_values.csv")
# df_all2 = pd.read_csv("/tmp/pycharm_project_723/imputed_data2.csv")
#
print(df_all.columns.tolist())
df_all = df_all.replace({'Complics':{False:0, True:1}})
df_all = df_all.replace({'Mortalty':{False:0, True:1}})
df_all = df_all.replace({'PrCVInt':{False:0, True:1}})
df_all = df_all.replace({'Mortality':{False:0, True:1}})
df_all = df_all.replace({'Mt30Stat':{'Alive':0, 'Dead':1, np.nan:2}})
# df_all = df_all.replace({'Reoperation':{'First Time':0, 'Reoperation':1}})
# df_all.rename(columns={"EF<=35%": "EF_less_equal_35"}, inplace=True)
# print (df_all['HospID_total_cardiac_surgery'].isna().sum())
# # df_all['HospID_total_cardiac_surgery'].str.strip(',').astype(float)
# # df_all["HospID_total_cardiac_surgery"] = pd.to_numeric(df_all["HospID_total_cardiac_surgery"])
# # df_all["HospID_total_cardiac_surgery"] = df_all["HospID_total_cardiac_surgery"].str.strip(', ')
# df_all["HospID_total_cardiac_surgery"] = df_all["HospID_total_cardiac_surgery"].astype(float)
# print (df_all['HospID_total_cardiac_surgery'].isna().sum())
# # df_test= df_all[
# # ['HospID','HospID_total_cardiac_surgery', 'HospID_Reop_CABG', 'HospID_total_CABG', 'surgyear',
# # 'surgid', 'surgid_total_cardiac_surgery','surgid_total_CABG', 'surgid_Reop_CABG', 'SiteID', 'Complics', 'Mortalty']].copy()
# #
# # mask = df_all['surgyear'] == 2019 and df_all['surgyear'] == 2018
# # df_2019 = df_all[mask]
# # df_2019.to_csv("2018 2019.csv")
# # print (df_all.columns.tolist())
# # print (df_all.head(10))
# # # df_all[:50].to_csv("numeric_df_after changes.csv")
# df_model_draft = df_all[
# ['HospID', 'HospID_Reop_CABG', 'HospID_total_CABG', 'surgyear','HospID_total_cardiac_surgery'
# 'surgid', 'surgid_total_cardiac_surgery','surgid_total_CABG', 'surgid_Reop_CABG', 'SiteID', 'Age',
# 'Gender', 'RaceCaucasian', 'RaceBlack', 'RaceOther', 'Ethnicity',
# 'FHCAD', 'Diabetes', 'Hypertn', 'Dyslip', 'Dialysis', 'InfEndo', 'ChrLungD', 'ImmSupp', 'PVD', 'CreatLst',
# 'PrevMI', 'Arrhythmia', 'PrCVInt', 'POCPCI', 'MedACEI', 'MedASA',
# 'MedBeta', 'MedInotr', 'MedNitIV', 'MedSter', 'NumDisV', 'HDEF', 'VDInsufA', 'VDStenA', 'VDInsufM', 'VDStenM',
# 'VDInsufT', 'VDStenT', 'Status', 'SmokingStatus', 'InsulinDiab',
# 'ModSevereLungDis', 'PreCVAorTIAorCVD', 'RenFail', 'Angina', 'UnstableAngina', 'ClassNYHGroup',
# 'ArrhythAtrFibFlutter', 'ArrhythOther', 'DualAntiPlat', 'MedHeparin', 'AntiCoag',
# 'MedAntiplateltNoASA', 'NumDisV_ordinal', 'LeftMain', 'EF_less_equal_35', 'BMI',
# 'Complics', 'Mortality', 'Reoperation']].copy()
#
# df_small = df_model_draft[df_model_draft['surgyear'].isin([2015])]
# print (df_small["HospID_total_cardiac_surgery"].unique())
# X = df_small.drop(
# ['HospID', 'SiteID', 'surgid', 'Complics', 'Mortality'], axis=1)
# y = df_small['Mortality'] # La
# print (X.isna().sum())
# print(y.isna().sum())
labels = ['TN', 'FP', 'FN', 'TP']
categories = ['Negative', 'Positive']
N_split=5
def Make_Confusion_Matrix(cf,
group_names=None,
categories='auto',
count=True,
percent=True,
cbar=True,
xyticks=True,
xyplotlabels=True,
sum_stats=True,
figsize=None,
cmap='Pastel1',
title=None,
y_pred=None,
y_test=None):
# CODE TO GENERATE TEXT INSIDE EACH SQUARE
blanks = ['' for i in range(cf.size)]
if group_names and len(group_names) == cf.size:
group_labels = ["{}\n".format(value) for value in group_names]
else:
group_labels = blanks
if count:
group_counts = ["{0:0.0f}\n".format(value) for value in cf.flatten()]
else:
group_counts = blanks
if percent:
group_percentages = ["{0:.2%}".format(value) for value in cf.flatten() / np.sum(cf)]
else:
group_percentages = blanks
box_labels = [f"{v1}{v2}{v3}".strip() for v1, v2, v3 in zip(group_labels, group_counts, group_percentages)]
box_labels = np.asarray(box_labels).reshape(cf.shape[0], cf.shape[1])
# CODE TO GENERATE SUMMARY STATISTICS & TEXT FOR SUMMARY STATS
if sum_stats:
# Accuracy is sum of diagonal divided by total observations
accuracy = np.trace(cf) / float(np.sum(cf))
# if it is a binary confusion matrix, show some more stats
if len(cf) == 2:
# Metrics for Binary Confusion Matrices
tn, fp, fn, tp = confusion_matrix(y_test, y_pred).ravel()
specificity = tn / (tn+fp)
precision = cf[1, 1] / sum(cf[:, 1])
recall = cf[1, 1] / sum(cf[1, :])
f1_score_test = f1_score(y_test, y_pred,average='macro')
stats_text = "\n\nAccuracy={:0.3f}\nPrecision={:0.3f}\nSensitivity={:0.3f}\nF1 Score={:0.3f}\nSpecificity={:0.3f}".format(accuracy, precision, recall, f1_score_test , specificity)
else:
stats_text = "\n\nAccuracy={:0.3f}".format(accuracy)
else:
stats_text = ""
# SET FIGURE PARAMETERS ACCORDING TO OTHER ARGUMENTS
fig = plt.figure()
sns.set(style="white")
sns.heatmap(cf, annot=box_labels, cmap=cmap, fmt='', cbar=cbar, xticklabels=categories, yticklabels=categories)
if xyplotlabels:
plt.ylabel('True Class')
plt.xlabel('Predicted Class' + stats_text)
else:
plt.xlabel(stats_text)
if title:
plt.title(title)
plt.show()
return {'F1 Score': f1_score_test, 'Accuracy': accuracy, 'Sensitivity': recall, 'Specificity': specificity}
def hyper_paramitize_scale_gridSearch():
counter = Counter(y)
# estimate scale_pos_weight value
estimate = counter[0] / counter[1]
print('Estimate: %.3f' % estimate)
print (counter[0])
print(counter[1])
model = XGBClassifier(objective='binary:logistic', eval_metric='logloss')
random = RandomUnderSampler(sampling_strategy=0.33)
# define grid
# weights = [1,3, 10, 25,30, 50, 75, 99, 100]
#param_grid = dict(scale_pos_weight=weights)
#param_grid= {'xgbclassifier__scale_pos_weight': weights}
learning_rates = [0.1, 0.05, 0.01]
max_depths = [1,2,3,5,8,10]
param_grid = {'xgbclassifier__max_depth': max_depths,
'xgbclassifier__learning_rate': learning_rates}
# define evaluation procedure
cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=2, random_state=1)
# define grid search
pipeline = Pipeline([('under',random ), ('xgbclassifier', model)])
grid = GridSearchCV(estimator=pipeline, param_grid=param_grid, n_jobs=-1, cv=cv, scoring='roc_auc')
# execute the grid search
grid_result = grid.fit(X, y)
# report the best configuration
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
# report all configurations
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))
# hyper_paramitize_scale_gridSearch()
def smote():
pipeline = imbpipeline(steps=[['smote', RandomOverSampler()],
['classifier', XGBClassifier(objective='binary:logistic', eval_metric='logloss')]])
stratified_kfold = StratifiedKFold(n_splits=10,
shuffle=True,
random_state=11)
max_depths = [2 ** x for x in range(1, 7)]
num_estimators = [10, 20, 30] + list(range(45, 100, 5))
learning_rates = [0.1, 0.05, 0.01]
param_grid = {'classifier__max_depth': max_depths,
'classifier__n_estimators': num_estimators,
'classifier__learning_rate': learning_rates}
grid_search = GridSearchCV(estimator=pipeline,
param_grid=param_grid,
scoring='roc_auc',
cv=stratified_kfold,
n_jobs=-1)
grid_search.fit(X, y)
print(grid_search)
print(grid_search.best_estimator_)
print(grid_search.best_params_)
cv_score = grid_search.best_score_
test_score = grid_search.score(X, y)
print(f'Cross-validation score: {cv_score}\nTest score: {test_score}')
# smote()
def hyperparameterCVoptimization():
# parameters = {
# "learning_rate": [0.05, 0.10, 0.15, 0.20, 0.25, 0.30],
# "max_depth": [3, 4, 5, 6, 8, 10, 12, 15],
# "gamma": [0.0, 0.1, 0.2, 0.3, 0.4],
# "colsample_bytree": [0.3, 0.4, 0.5, 0.7]
# }
X = df_small.drop(
['HospID', 'SiteID', 'surgid', 'Complics', 'Mortality'], axis=1)
y = df_small['Mortality'] # La
params = {
'max_depth': range(2, 10, 1),
# 'n_estimators': range(60, 220, 40),
# 'learning_rate': [0.1, 0.01, 0.05]
}
i=1
kf = StratifiedKFold(n_splits=5,random_state=1,shuffle=True)
for train_index,test_index in kf.split(np.zeros(len(y)), y):
print('\n{} of kfold {}'.format(i,kf.n_splits))
xtr,xvl = X.iloc[train_index],X.iloc[test_index]
ytr,yvl = y.iloc[train_index],y.iloc[test_index]
model = GridSearchCV(XGBClassifier(objective='binary:logistic', eval_metric='logloss'), param_grid=params, cv=5, scoring= 'roc_auc')
model.fit(xtr, ytr)
print (model.best_params_)
pred=model.predict(xvl)
print('accuracy_score',accuracy_score(yvl,pred))
print (classification_report(yvl,pred))
y_pred = model.predict_proba(xvl)[:, 1]
print ('roc-auc',roc_auc_score(yvl, y_pred) )
i+=1
print ("==========================================================")
# hyperparameterCVoptimization()
def cvsmote():
X = df_small.drop(
['HospID', 'SiteID', 'surgid', 'Complics', 'Mortality'], axis=1)
y = df_small['Mortality']
steps = [('over', SMOTE()), ('model', XGBClassifier(objective='binary:logistic', eval_metric='logloss'))]
pipeline = Pipeline(steps=steps)
# evaluate pipeline
for scoring in ["accuracy", "roc_auc"]:
cv = StratifiedKFold(n_splits=10, random_state=0)
scores = cross_val_score(pipeline, X, y, scoring=scoring, cv=cv, n_jobs=-1)
print("Model", scoring, " mean=", scores.mean(), "stddev=", scores.std())
def intersection(lst1, lst2):
return list(set(lst1) & set(lst2))
def test_df(df):
for col in df:
print ( df['col1'].equals(df['col2']))
# Driver Code
def check_split_differnce():
df_train_hosp =
|
pd.DataFrame()
|
pandas.DataFrame
|
import logging
import bt
import pandas as pd
import numpy as np
class StrategyBase(object):
def __init__(self, name):
super().__init__()
self.name_ = name
self.stock_ids_ = []
def initialize(self, context, stock_ids):
self.stock_ids_ = stock_ids
class SelectWithBuySellData(bt.Algo):
def __init__(self, buy_data, sell_data):
self.buy_data_ = buy_data
self.sell_data_ = sell_data
def __call__(self, target):
d =
|
pd.to_datetime(target.now)
|
pandas.to_datetime
|
# **Authors: <EMAIL>**
# Additional packages
import os
import math
import numbers
import pandas as pd
import opendssdirect as dss
import numpy as np
from PyDSS.pyPostprocessor.pyPostprocessAbstract import AbstractPostprocess
# from PyDSS.exceptions import InvalidParameter, OpenDssConvergenceError
from PyDSS.utils.timing_utils import timed_info
# to get xfmr information
def get_transformer_info():
"""
Gather transformer information
"""
xfmr_name = dss.Transformers.Name()
xfmr_data_dict = {
"name": xfmr_name,
"num_phases": dss.Properties.Value("Phases"),
"num_wdgs": dss.Transformers.NumWindings(),
"kva": [],
"conn": [],
"kv": [],
}
for wdgs in range(xfmr_data_dict["num_wdgs"]):
dss.Transformers.Wdg(wdgs + 1)
xfmr_data_dict["kva"].append(float(dss.Properties.Value("kva")))
xfmr_data_dict["kv"].append(float(dss.Properties.Value("kv")))
xfmr_data_dict["conn"].append(dss.Properties.Value("conn"))
return xfmr_data_dict
def get_g(r_value):
"""
Get conductance values from resistance values
"""
return float(str(r_value[0]).split("|")[0]) ** (-1)
#
@timed_info
def compute_electric_distance(bus_phases=None):
"""
This method computes electric distance matrix
"""
lines = dss.utils.lines_to_dataframe()
column_list = [c.strip().lower() for c in lines.columns]
lines.columns = column_list
lines["phases"] = pd.to_numeric(lines["phases"])
if bus_phases is not None:
lines = lines.loc[
lines["phases"] == bus_phases, ["bus1", "bus2", "rmatrix"]
].copy()
lines["g"] = lines["rmatrix"].apply(get_g)
busall = np.unique((list(lines["bus1"]) + list(lines["bus2"])))
disGmat_df =
|
pd.DataFrame(0, index=busall, columns=busall)
|
pandas.DataFrame
|
import importlib
import numpy as np
from torch.utils.data import Dataset, DataLoader
import random
class DataSource(Dataset):
def __init__(self, data_frame, configuration):
"""
Create a lightwood datasource from the data frame
:param data_frame:
:param configuration
"""
self.data_frame = data_frame
self.configuration = configuration
self.encoders = {}
self.transformer = None
self.training = False # Flip this flag if you are using the datasource while training
self._clear_cache()
def _clear_cache(self):
self.list_cache = {}
self.encoded_cache = {}
self.transformed_cache = None
self.decoded_cache = {}
def extractRandomSubset(self, percentage):
msk = np.random.rand(len(self.data_frame)) < (1-percentage)
test_df = self.data_frame[~msk]
self.data_frame = self.data_frame[msk]
# clear caches
self._clear_cache()
ds = DataSource(test_df, self.configuration)
ds.encoders = self.encoders
ds.transformer = self.transformer
return ds
def __len__(self):
"""
return the length of the datasource (as in number of rows)
:return: number of rows
"""
return int(self.data_frame.shape[0])
def __getitem__(self, idx):
"""
:param idx:
:return:
"""
sample = {}
dropout_features = None
if self.training == True and random.randint(0,2) == 1:
dropout_features = [feature['name'] for feature in self.configuration['input_features'] if random.randint(0,10) >= 8]
if self.transformed_cache is None:
self.transformed_cache = [None] * self.__len__()
if dropout_features is None:
cached_sample = self.transformed_cache[idx]
if cached_sample is not None:
return cached_sample
for feature_set in ['input_features', 'output_features']:
sample[feature_set] = {}
for feature in self.configuration[feature_set]:
col_name = feature['name']
col_config = self.get_column_config(feature)
if col_name not in self.encoded_cache: # if data is not encoded yet, encode values
self.get_encoded_column_data(col_name, feature_set)
# if we are dropping this feature, get the encoded value of None
if dropout_features is not None and feature in dropout_features:
custom_data = {feature:[None]}
# if the dropout feature depends on another column, also pass a None array as the dependant column
if 'depends_on_column' in col_config:
custom_data[custom_data['depends_on_column']]= [None]
sample[feature_set][col_name] = self.get_encoded_column_data(col_name, feature_set, custom_data=custom_data)
else:
sample[feature_set][col_name] = self.encoded_cache[col_name][idx]
if self.transformer:
sample = self.transformer.transform(sample)
# only cache if no dropout features
if dropout_features is None:
self.transformed_cache[idx] = sample
return self.transformed_cache[idx]
else:
return sample
def get_column_original_data(self, column_name):
"""
:param column_name:
:return:
"""
if column_name in self.list_cache:
return self.list_cache[column_name]
if column_name in self.data_frame:
self.list_cache[column_name] = self.data_frame[column_name].tolist()
return self.list_cache[column_name]
else: # if column not in dataframe
rows = self.data_frame.shape[0]
return [None] * rows
def get_encoded_column_data(self, column_name, feature_set = 'input_features', custom_data = None):
"""
:param column_name:
:return:
"""
if column_name in self.encoded_cache:
return self.encoded_cache[column_name]
# first argument of encoder is the data, so we either pass the custom data or we get the column data
if custom_data is not None:
args = [custom_data[column_name]]
else:
args = [self.get_column_original_data(column_name)]
config = self.get_column_config(column_name)
# see if the feature has dependencies in other columns
if 'depends_on_column' in config:
if custom_data is not None:
arg2 = custom_data[config['depends_on_column']]
else:
arg2 = self.get_column_original_data(config['depends_on_column'])
args += [arg2]
if column_name in self.encoders:
self.encoded_cache[column_name] = self.encoders[column_name].encode(*args)
return self.encoded_cache[column_name]
if 'encoder_class' not in config:
path = 'lightwood.encoders.{type}'.format(type=config['type'])
module = importlib.import_module(path)
if hasattr(module, 'default'):
encoder_class = importlib.import_module(path).default
else:
raise ValueError('No default encoder for {type}'.format(type=config['type']))
else:
encoder_class = config['encoder_class']
encoder_attrs = config['encoder_attrs'] if 'encoder_attrs' in config else {}
encoder_instance = encoder_class()
for attr in encoder_attrs:
if hasattr(encoder_instance, attr):
setattr(encoder_instance, attr, encoder_attrs[attr])
self.encoders[column_name] = encoder_instance
self.encoded_cache[column_name] = encoder_instance.encode(*args)
return self.encoded_cache[column_name]
def get_decoded_column_data(self, column_name, encoded_data, decoder_instance=None, cache=True):
"""
:param column_name: column names to be decoded
:param encoded_data: encoded data of tensor type
:return decoded_cache : Dict :Decoded data of input column
"""
if decoder_instance is None:
if column_name not in self.encoders:
raise ValueError(
'Data must have been encoded before at some point, you should not decode before having encoding at least once')
decoder_instance = self.encoders[column_name]
decoded_data = decoder_instance.decode(encoded_data)
if cache == True:
self.decoded_cache[column_name] = decoded_data
return decoded_data
def get_feature_names(self, where = 'input_features'):
return [feature['name'] for feature in self.configuration[where]]
def get_column_config(self, column_name):
"""
Get the config info for the feature given a configuration as defined in data_schemas definition.py
:param column_name:
:return:
"""
for feature_set in ['input_features', 'output_features']:
for feature in self.configuration[feature_set]:
if feature['name'] == column_name:
return feature
if __name__ == "__main__":
import random
import pandas
config = {
'name': 'test',
'input_features': [
{
'name': 'x',
'type': 'numeric',
'encoder_path': 'lightwood.encoders.numeric.numeric'
},
{
'name': 'y',
'type': 'numeric',
# 'encoder_path': 'lightwood.encoders.numeric.numeric'
}
],
'output_features': [
{
'name': 'z',
'type': 'categorical',
# 'encoder_path': 'lightwood.encoders.categorical.categorical'
}
]
}
data = {'x': [i for i in range(10)], 'y': [random.randint(i, i + 20) for i in range(10)]}
nums = [data['x'][i] * data['y'][i] for i in range(10)]
data['z'] = ['low' if i < 50 else 'high' for i in nums]
data_frame =
|
pandas.DataFrame(data)
|
pandas.DataFrame
|
#!/usr/bin/env python
# Copyright 2016-2021 Biomedical Imaging Group Rotterdam, Departments of
# Medical Informatics and Radiology, Erasmus MC, Rotterdam, The Netherlands
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import logging
import os
import time
from time import gmtime, strftime
from sklearn.model_selection import train_test_split, LeaveOneOut
from .parameter_optimization import random_search_parameters
import WORC.addexceptions as ae
from WORC.classification.regressors import regressors
import glob
import random
import json
from copy import copy
from sklearn.metrics import f1_score, roc_auc_score
def random_split_cross_validation(image_features, feature_labels, classes,
patient_ids,
n_iterations, param_grid, config,
modus, test_size, start=0, save_data=None,
tempsave=False, tempfolder=None,
fixedsplits=None,
fixed_seed=False, use_fastr=None,
fastr_plugin=None,
do_test_RS_Ensemble=False):
"""Cross-validation in which data is randomly split in each iteration.
Due to options of doing single-label and multi-label classification,
stratified splitting, and regression, we use a manual loop instead
of the default scikit-learn object.
Parameters
------------
Returns
------------
"""
print('Starting random-split cross-validation.')
logging.debug('Starting random-split cross-validation.')
if save_data is None:
# Start from zero, thus empty list of previos data
save_data = list()
# If we are using fixed splits, set the n_iterations to the number of splits
if fixedsplits is not None:
n_iterations = int(fixedsplits.columns.shape[0] / 2)
print(f'Fixedsplits detected, adjusting n_iterations to {n_iterations}')
for i in range(start, n_iterations):
print(('Cross-validation iteration {} / {} .').format(str(i + 1), str(n_iterations)))
logging.debug(('Cross-validation iteration {} / {} .').format(str(i + 1), str(n_iterations)))
timestamp = strftime("%Y-%m-%d %H:%M:%S", gmtime())
print(f'\t Time: {timestamp}.')
logging.debug(f'\t Time: {timestamp}.')
if fixed_seed:
random_seed = i**2
else:
random_seed = np.random.randint(5000)
t = time.time()
# Split into test and training set, where the percentage of each
# label is maintained
if any(clf in regressors for clf in param_grid['classifiers']):
# We cannot do a stratified shuffle split with regression
classes_temp = classes
stratify = None
else:
if modus == 'singlelabel':
classes_temp = stratify = classes.ravel()
elif modus == 'multilabel':
# Create a stratification object from the labels
# Label = 0 means no label equals one
# Other label numbers refer to the label name that is 1
stratify = list()
for pnum in range(0, len(classes[0])):
plabel = 0
for lnum, slabel in enumerate(classes):
if slabel[pnum] == 1:
plabel = lnum + 1
stratify.append(plabel)
# Sklearn multiclass requires rows to be objects/patients
classes_temp = np.zeros((classes.shape[1], classes.shape[0]))
for n_patient in range(0, classes.shape[1]):
for n_label in range(0, classes.shape[0]):
classes_temp[n_patient, n_label] = classes[n_label, n_patient]
else:
raise ae.WORCKeyError('{} is not a valid modus!').format(modus)
if fixedsplits is None:
# Use Random Split. Split per patient, not per sample
unique_patient_ids, unique_indices =\
np.unique(np.asarray(patient_ids), return_index=True)
if any(clf in regressors for clf in param_grid['classifiers']):
unique_stratify = None
else:
unique_stratify = [stratify[i] for i in unique_indices]
try:
unique_PID_train, indices_PID_test\
= train_test_split(unique_patient_ids,
test_size=test_size,
random_state=random_seed,
stratify=unique_stratify)
except ValueError as e:
e = str(e) + ' Increase the size of your validation set.'
raise ae.WORCValueError(e)
# Check for all ids if they are in test or training
indices_train = list()
indices_test = list()
patient_ID_train = list()
patient_ID_test = list()
for num, pid in enumerate(patient_ids):
if pid in unique_PID_train:
indices_train.append(num)
# Make sure we get a unique ID
if pid in patient_ID_train:
n = 1
while str(pid + '_' + str(n)) in patient_ID_train:
n += 1
pid = str(pid + '_' + str(n))
patient_ID_train.append(pid)
else:
indices_test.append(num)
# Make sure we get a unique ID
if pid in patient_ID_test:
n = 1
while str(pid + '_' + str(n)) in patient_ID_test:
n += 1
pid = str(pid + '_' + str(n))
patient_ID_test.append(pid)
# Split features and labels accordingly
X_train = [image_features[i] for i in indices_train]
X_test = [image_features[i] for i in indices_test]
if modus == 'singlelabel':
Y_train = classes_temp[indices_train]
Y_test = classes_temp[indices_test]
elif modus == 'multilabel':
Y_train = classes_temp[indices_train, :]
Y_test = classes_temp[indices_test, :]
else:
raise ae.WORCKeyError('{} is not a valid modus!').format(modus)
else:
# Use pre defined splits
train = fixedsplits[str(i) + '_train'].dropna().values
test = fixedsplits[str(i) + '_test'].dropna().values
# Convert the numbers to the correct indices
ind_train = list()
for j in train:
success = False
for num, p in enumerate(patient_ids):
if j == p:
ind_train.append(num)
success = True
if not success:
raise ae.WORCIOError("Patient " + str(j).zfill(3) + " is not included!")
ind_test = list()
for j in test:
success = False
for num, p in enumerate(patient_ids):
if j == p:
ind_test.append(num)
success = True
if not success:
raise ae.WORCIOError("Patient " + str(j).zfill(3) + " is not included!")
X_train = [image_features[i] for i in ind_train]
X_test = [image_features[i] for i in ind_test]
patient_ID_train = patient_ids[ind_train]
patient_ID_test = patient_ids[ind_test]
if modus == 'singlelabel':
Y_train = classes_temp[ind_train]
Y_test = classes_temp[ind_test]
elif modus == 'multilabel':
Y_train = classes_temp[ind_train, :]
Y_test = classes_temp[ind_test, :]
else:
raise ae.WORCKeyError('{} is not a valid modus!').format(modus)
# Find best hyperparameters and construct classifier
config['HyperOptimization']['use_fastr'] = use_fastr
config['HyperOptimization']['fastr_plugin'] = fastr_plugin
n_cores = config['General']['Joblib_ncores']
trained_classifier = random_search_parameters(features=X_train,
labels=Y_train,
param_grid=param_grid,
n_cores=n_cores,
random_seed=random_seed,
**config['HyperOptimization'])
# We only want to save the feature values and one label array
X_train = [x[0] for x in X_train]
X_test = [x[0] for x in X_test]
temp_save_data = (trained_classifier, X_train, X_test, Y_train,
Y_test, patient_ID_train, patient_ID_test, random_seed)
save_data.append(temp_save_data)
# Test performance for various RS and ensemble sizes
if do_test_RS_Ensemble:
output_json = os.path.join(tempfolder, f'performance_RS_Ens_crossval_{i}.json')
test_RS_Ensemble(estimator_input=trained_classifier,
X_train=X_train, Y_train=Y_train,
X_test=X_test, Y_test=Y_test,
feature_labels=feature_labels,
output_json=output_json)
# Save memory
delattr(trained_classifier, 'fitted_workflows')
trained_classifier.fitted_workflows = list()
# Create a temporary save
if tempsave:
panda_labels = ['trained_classifier', 'X_train', 'X_test',
'Y_train', 'Y_test',
'config', 'patient_ID_train', 'patient_ID_test',
'random_seed', 'feature_labels']
panda_data_temp =\
pd.Series([trained_classifier, X_train, X_test, Y_train,
Y_test, config, patient_ID_train,
patient_ID_test, random_seed, feature_labels],
index=panda_labels,
name='Constructed crossvalidation')
panda_data = pd.DataFrame(panda_data_temp)
n = 0
filename = os.path.join(tempfolder, 'tempsave_' + str(i) + '.hdf5')
while os.path.exists(filename):
n += 1
filename = os.path.join(tempfolder, 'tempsave_' + str(i + n) + '.hdf5')
panda_data.to_hdf(filename, 'EstimatorData')
del panda_data, panda_data_temp
# Print elapsed time
elapsed = int((time.time() - t) / 60.0)
print(f'\t Fitting took {elapsed} minutes.')
logging.debug(f'\t Fitting took {elapsed} minutes.')
return save_data
def LOO_cross_validation(image_features, feature_labels, classes, patient_ids,
param_grid, config,
modus, test_size, start=0, save_data=None,
tempsave=False, tempfolder=None, fixedsplits=None,
fixed_seed=False, use_fastr=None,
fastr_plugin=None):
"""Cross-validation in which each sample is once used as the test set.
Mostly based on the default sklearn object.
Parameters
------------
Returns
------------
"""
print('Starting leave-one-out cross-validation.')
logging.debug('Starting leave-one-out cross-validation.')
cv = LeaveOneOut()
n_splits = cv.get_n_splits(image_features)
if save_data is None:
# Start from zero, thus empty list of previos data
save_data = list()
for i, (indices_train, indices_test) in enumerate(cv.split(image_features)):
if i < start:
continue
print(('Cross-validation iteration {} / {} .').format(str(i + 1), str(n_splits)))
logging.debug(('Cross-validation iteration {} / {} .').format(str(i + 1), str(n_splits)))
timestamp = strftime("%Y-%m-%d %H:%M:%S", gmtime())
print(f'\t Time: {timestamp}.')
logging.debug(f'\t Time: {timestamp}.')
if fixed_seed:
random_seed = i**2
else:
random_seed = np.random.randint(5000)
t = time.time()
# Split features and labels accordingly
X_train = [image_features[j] for j in indices_train]
X_test = [image_features[j] for j in indices_test]
patient_ID_train = [patient_ids[j] for j in indices_train]
patient_ID_test = [patient_ids[j] for j in indices_test]
if modus == 'singlelabel':
# Simply use the given class labels
classes_temp = classes.ravel()
# Split in training and testing
Y_train = classes_temp[indices_train]
Y_test = classes_temp[indices_test]
elif modus == 'multilabel':
# Sklearn multiclass requires rows to be objects/patients
classes_temp = np.zeros((classes.shape[1], classes.shape[0]))
for n_patient in range(0, classes.shape[1]):
for n_label in range(0, classes.shape[0]):
classes_temp[n_patient, n_label] = classes[n_label, n_patient]
# Split in training and testing
Y_train = classes_temp[indices_train, :]
Y_test = classes_temp[indices_test, :]
else:
raise ae.WORCKeyError('{} is not a valid modus!').format(modus)
# Find best hyperparameters and construct classifier
config['HyperOptimization']['use_fastr'] = use_fastr
config['HyperOptimization']['fastr_plugin'] = fastr_plugin
n_cores = config['General']['Joblib_ncores']
trained_classifier = random_search_parameters(features=X_train,
labels=Y_train,
param_grid=param_grid,
n_cores=n_cores,
random_seed=random_seed,
**config['HyperOptimization'])
# We only want to save the feature values and one label array
X_train = [x[0] for x in X_train]
X_test = [x[0] for x in X_test]
temp_save_data = (trained_classifier, X_train, X_test, Y_train,
Y_test, patient_ID_train, patient_ID_test, random_seed)
save_data.append(temp_save_data)
# Create a temporary save
if tempsave:
panda_labels = ['trained_classifier', 'X_train', 'X_test',
'Y_train', 'Y_test',
'config', 'patient_ID_train', 'patient_ID_test',
'random_seed', 'feature_labels']
panda_data_temp =\
pd.Series([trained_classifier, X_train, X_test, Y_train,
Y_test, config, patient_ID_train,
patient_ID_test, random_seed, feature_labels],
index=panda_labels,
name='Constructed crossvalidation')
panda_data = pd.DataFrame(panda_data_temp)
n = 0
filename = os.path.join(tempfolder, 'tempsave_' + str(i) + '.hdf5')
while os.path.exists(filename):
n += 1
filename = os.path.join(tempfolder, 'tempsave_' + str(i + n) + '.hdf5')
panda_data.to_hdf(filename, 'EstimatorData')
del panda_data, panda_data_temp
# Print elapsed time
elapsed = int((time.time() - t) / 60.0)
print(f'\t Fitting took {elapsed} minutes.')
logging.debug(f'\t Fitting took {elapsed} minutes.')
return save_data
def crossval(config, label_data, image_features,
param_grid=None, use_fastr=False,
fastr_plugin=None, tempsave=False,
fixedsplits=None, ensemble={'Use': False}, outputfolder=None,
modus='singlelabel'):
"""Constructs multiple individual classifiers based on the label settings.
Parameters
----------
config: dict, mandatory
Dictionary with config settings. See the Github Wiki for the
available fields and formatting.
label_data: dict, mandatory
Should contain the following:
patient_ids (list): ids of the patients, used to keep track of test and
training sets, and label data
label (list): List of lists, where each list contains the
label status for that patient for each
label
label_name (list): Contains the different names that are stored
in the label object
image_features: numpy array, mandatory
Consists of a tuple of two lists for each patient:
(feature_values, feature_labels)
param_grid: dictionary, optional
Contains the parameters and their values wich are used in the
grid or randomized search hyperparamater optimization. See the
construct_classifier function for some examples.
use_fastr: boolean, default False
If False, parallel execution through Joblib is used for fast
execution of the hyperparameter optimization. Especially suited
for execution on mutlicore (H)PC's. The settings used are
specified in the config.ini file in the IOparser folder, which you
can adjust to your system.
If True, fastr is used to split the hyperparameter optimization in
separate jobs. Parameters for the splitting can be specified in the
config file. Especially suited for clusters.
fastr_plugin: string, default None
Determines which plugin is used for fastr executions.
When None, uses the default plugin from the fastr config.
tempsave: boolean, default False
If True, create a .hdf5 file after each Cross-validation containing
the classifier and results from that that split. This is written to
the GSOut folder in your fastr output mount. If False, only
the result of all combined Cross-validations will be saved to a .hdf5
file. This will also be done if set to True.
fixedsplits: string, optional
By default, random split Cross-validation is used to train and
evaluate the machine learning methods. Optionally, you can provide
a .xlsx file containing fixed splits to be used. See the Github Wiki
for the format.
ensemble: dictionary, optional
Contains the configuration for constructing an ensemble.
modus: string, default 'singlelabel'
Determine whether one-vs-all classification (or regression) for
each single label is used ('singlelabel') or if multilabel
classification is performed ('multilabel').
Returns
----------
panda_data: pandas dataframe
Contains all information on the trained classifier.
"""
# Process input data
patient_ids = label_data['patient_IDs']
label_value = label_data['label']
label_name = label_data['label_name']
if outputfolder is None:
outputfolder = os.getcwd()
logfilename = os.path.join(outputfolder, 'classifier.log')
print("Logging to file " + str(logfilename))
# Cross-validation iteration to start with
start = 0
save_data = list()
if tempsave:
tempfolder = os.path.join(outputfolder, 'tempsave')
if not os.path.exists(tempfolder):
# No previous tempsaves
os.makedirs(tempfolder)
else:
# Previous tempsaves, start where we left of
tempsaves = glob.glob(os.path.join(tempfolder, 'tempsave_*.hdf5'))
start = len(tempsaves)
# Load previous tempsaves and add to save data
tempsaves.sort()
for t in tempsaves:
t = pd.read_hdf(t)
t = t['Constructed crossvalidation']
temp_save_data = (t.trained_classifier, t.X_train, t.X_test,
t.Y_train, t.Y_test, t.patient_ID_train,
t.patient_ID_test, t.random_seed)
save_data.append(temp_save_data)
else:
tempfolder = None
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(filename=logfilename, level=logging.DEBUG)
crossval_type = config['CrossValidation']['Type']
n_iterations = config['CrossValidation']['N_iterations']
test_size = config['CrossValidation']['test_size']
fixed_seed = config['CrossValidation']['fixed_seed']
classifier_labelss = dict()
logging.debug('Starting fitting of estimators.')
# We only need one label instance, assuming they are all the sample
feature_labels = image_features[0][1]
# Check if we need to use fixedsplits:
if fixedsplits is not None and '.csv' in fixedsplits:
fixedsplits = pd.read_csv(fixedsplits, header=0)
# Fixedsplits need to be performed in random split fashion, makes no sense for LOO
if crossval_type == 'LOO':
print('[WORC WARNING] Fixedsplits need to be performed in random split fashion, makes no sense for LOO.')
crossval_type = 'random_split'
if modus == 'singlelabel':
print('Performing single-class classification.')
logging.debug('Performing single-class classification.')
elif modus == 'multilabel':
print('Performing multi-label classification.')
logging.debug('Performing multi-class classification.')
label_value = [label_value]
label_name = [label_name]
else:
m = ('{} is not a valid modus!').format(modus)
logging.debug(m)
raise ae.WORCKeyError(m)
for i_class, i_name in zip(label_value, label_name):
if not tempsave:
save_data = list()
if crossval_type == 'random_split':
print('Performing random-split cross-validations.')
logging.debug('Performing random-split cross-validations.')
save_data =\
random_split_cross_validation(image_features=image_features,
feature_labels=feature_labels,
classes=i_class,
patient_ids=patient_ids,
n_iterations=n_iterations,
param_grid=param_grid,
config=config,
modus=modus,
test_size=test_size,
start=start,
save_data=save_data,
tempsave=tempsave,
tempfolder=tempfolder,
fixedsplits=fixedsplits,
fixed_seed=fixed_seed,
use_fastr=use_fastr,
fastr_plugin=fastr_plugin)
elif crossval_type == 'LOO':
print('Performing leave-one-out cross-validations.')
logging.debug('Performing leave-one-out cross-validations.')
save_data =\
LOO_cross_validation(image_features=image_features,
feature_labels=feature_labels,
classes=i_class,
patient_ids=patient_ids,
param_grid=param_grid,
config=config,
modus=modus,
test_size=test_size,
start=start,
save_data=save_data,
tempsave=tempsave,
tempfolder=tempfolder,
fixedsplits=fixedsplits,
fixed_seed=fixed_seed,
use_fastr=use_fastr,
fastr_plugin=fastr_plugin)
else:
raise ae.WORCKeyError(f'{crossval_type} is not a recognized cross-validation type.')
[classifiers, X_train_set, X_test_set, Y_train_set, Y_test_set,
patient_ID_train_set, patient_ID_test_set, seed_set] =\
zip(*save_data)
# Convert to lists
classifiers = list(classifiers)
X_train_set = list(X_train_set)
X_test_set = list(X_test_set)
Y_train_set = list(Y_train_set)
Y_test_set = list(Y_test_set)
patient_ID_train_set = list(patient_ID_train_set)
patient_ID_test_set = list(patient_ID_test_set)
seed_set = list(seed_set)
panda_labels = ['classifiers', 'X_train', 'X_test', 'Y_train', 'Y_test',
'config', 'patient_ID_train', 'patient_ID_test',
'random_seed', 'feature_labels']
panda_data_temp =\
pd.Series([classifiers, X_train_set, X_test_set, Y_train_set,
Y_test_set, config, patient_ID_train_set,
patient_ID_test_set, seed_set, feature_labels],
index=panda_labels,
name='Constructed crossvalidation')
if modus == 'singlelabel':
i_name = ''.join(i_name)
elif modus == 'multilabel':
i_name = ','.join(i_name)
classifier_labelss[i_name] = panda_data_temp
panda_data =
|
pd.DataFrame(classifier_labelss)
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import requests
from io import BytesIO
from datetime import datetime
import os
import random
from back.image_manager import TextImage
dow = ["Пн", "Вт", "Ср", "Чт", "Пт", "Сб", "Вс"]
sf_pairs = ["900-1030", "1040-1210", "1240-1410", "1420-1550", "1620-1750", "1800-1930"]
start_date = "07.02.2022"
today_dow = dow[datetime.now().weekday()]
def get_table(
CACHED_TABLE_PATH: str = None,
CACHED_TABLE_NAME: str = None,
DOWNLOAD_LINK: str = None,
):
def download_table(DOWNLOAD_LINK):
responce = requests.get(DOWNLOAD_LINK)
with BytesIO(responce.content) as bytes_table:
# Unnamed:0 - index, we don`t need it
return pd.io.excel.read_excel(bytes_table).drop("Unnamed: 0", axis=1)
def get_cached_table(CACHED_TABLE_PATH, CACHED_TABLE_NAME):
return pd.read_excel(f"{CACHED_TABLE_PATH}/{CACHED_TABLE_NAME}")
def save_cache(table, CACHED_TABLE_PATH, CACHED_TABLE_NAME):
table.to_excel(f"{CACHED_TABLE_PATH}/{CACHED_TABLE_NAME}", index=False)
table = False
if DOWNLOAD_LINK:
table = download_table(DOWNLOAD_LINK)
elif CACHED_TABLE_PATH and CACHED_TABLE_NAME:
table = get_cached_table(CACHED_TABLE_PATH, CACHED_TABLE_NAME)
else:
table = False
if table.shape[0] and CACHED_TABLE_PATH and CACHED_TABLE_NAME:
save_cache(table, CACHED_TABLE_PATH, CACHED_TABLE_NAME)
return table
def split_table(table, target_group):
global dow
global sf_pairs
def get_start_group_point(table, target_group):
column1 = list(table[table.columns.values[0]].values)
row_index = column1.index("Группа")
column_index = list(table.iloc[row_index, :].values).index(target_group)
return column_index, row_index
def clean_df_table(table):
return table.dropna()
column_index, row_index = get_start_group_point(table, target_group)
# 12*6 - pairs per day * working days
# +2 - useless rows
# +4 - pair_name, type_of_pair, teacher, classroom
target_table = table.iloc[
row_index + 2 : row_index + 2 + 12 * 6, column_index : column_index + 4
].reset_index(drop=True)
new_pair_types = target_table[target_table.columns.values[1]].apply(
lambda x: x if x != "пр" else "сем"
)
target_table[target_table.columns.values[1]] = new_pair_types
target_table_odd = pd.concat(
(
pd.Series(np.array(list(map(lambda x: [x] * 6, dow))).reshape(1, -1)[0]),
pd.Series(sf_pairs * 6),
target_table.iloc[::2].reset_index(drop=True),
),
axis=1,
)
target_table_even = pd.concat(
(
pd.Series(np.array(list(map(lambda x: [x] * 6, dow))).reshape(1, -1)[0]),
|
pd.Series(sf_pairs * 6)
|
pandas.Series
|
"""
Visualizer classes for GOES-R series.
Authors:
<NAME>, <NAME> (2021)
"""
import argparse
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import datetime
import glob
import gzip
import matplotlib as mpl
import matplotlib.pyplot as plt
import metpy
from netCDF4 import Dataset
import numpy as np
import pandas as pd
import os
import xarray
class Visualizer(object):
def __init__(self, image_file, measurement_file, band2extract, scene2extract=None,
vmax=0.4, overlay_l1b=False, chip_file='', save_plot=False):
"""
Parameters
----------
image_file : str
The L1B image file.
measurement_file : str
The measurement file.
band2extract : int
The band to extract.
scene2extract : str
The scene to extract. E.g., 1810-07182020, meaning scene falling during
18:10 on 07/18/2021.
vmax : int
The max to stetch. Larger->less contrast.
overlay_l1b : {True, False}
Whether to overlay the L1B image. By default shows the generaric
land/ocean map.
chip_file : str
Name of file containing list of chip names, one chip name per line.
save_plot : {True, False}
Whether to save the plot or just show it.
"""
self.image_file = image_file
self.measurement_file = measurement_file
self.band2extract = band2extract
self.scene2extract = scene2extract
self.vmax = float(vmax)
self.overlay_l1b = overlay_l1b
self.chip_file = chip_file
self.save_plot = save_plot
self.scene = ''
self.nir_flg = False
if self.measurement_file != '':
# Extract satellite name
self.sat = self.measurement_file.split('/')[-1].split('_')[0]
# Extract the metric type
self.metric = self.measurement_file.split('/')[-1].split('_')[1]
# Find coverage
if 'CONUS' in self.measurement_file:
self.coverage = 'CONUS'
else:
self.coverage = 'FULL'
else:
self.sat = ''
self.metric = ''
self.coverage = ''
# Build band name
if self.band2extract/10 < 1:
self.band = '0' + str(self.band2extract)
else:
self.band = str(self.band2extract)
def extract_geoloc(self):
""" Extract the geolocation information for the band of interest from the
appropriate Chip DB file.
"""
# Extract the input date and time
if self.scene2extract != None:
date = datetime.datetime.strptime(self.scene2extract.split('-')[1], '%m%d%Y')
time = datetime.datetime.strptime(self.scene2extract.split('-')[0], '%H%M')
date_time = datetime.datetime.strptime(self.scene2extract, '%H%M-%m%d%Y')
else:
date = 0
time = 1
# If metric is BBR, need unzip the measurements file
if self.metric == 'BBR':
with gzip.open(self.measurement_file) as f:
measure_df =
|
pd.read_csv(self.measurement_file)
|
pandas.read_csv
|
"""Tests for the sdv.constraints.tabular module."""
import uuid
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
Between, ColumnFormula, CustomConstraint, GreaterThan, Negative, OneHotEncoding, Positive,
Rounding, UniqueCombinations)
def dummy_transform():
pass
def dummy_reverse_transform():
pass
def dummy_is_valid():
pass
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid'
# Run
instance = CustomConstraint(
transform=dummy_transform,
reverse_transform=dummy_reverse_transform,
is_valid=is_valid_fqn
)
# Assert
assert instance.transform == dummy_transform
assert instance.reverse_transform == dummy_reverse_transform
assert instance.is_valid == dummy_is_valid
class TestUniqueCombinations():
def test___init__(self):
"""Test the ``UniqueCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce unique combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns)
# Assert
assert instance._columns == columns
def test_fit(self):
"""Test the ``UniqueCombinations.fit`` method.
The ``UniqueCombinations.fit`` method is expected to:
- Call ``UniqueCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = pd.DataFrame({
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
pd.testing.assert_frame_equal(instance._combinations, expected_combinations)
def test_is_valid_true(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_true(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_false(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [6, 7, 8],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``UniqueCombinations.transform`` method.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns concatenated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c'].items()]
except ValueError:
assert False
def test_transform_non_string(self):
"""Test the ``UniqueCombinations.transform`` method with non strings.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns as UUIDs.
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c#d'].items()]
except ValueError:
assert False
def test_transform_not_all_columns_provided(self):
"""Test the ``UniqueCombinations.transform`` method.
If some of the columns needed for the transform are missing, and
``fit_columns_model`` is False, it will raise a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns, fit_columns_model=False)
instance.fit(table_data)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test_reverse_transform(self):
"""Test the ``UniqueCombinations.reverse_transform`` method.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_non_string(self):
"""Test the ``UniqueCombinations.reverse_transform`` method with a non string column.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestGreaterThan():
def test___init___strict_false(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._strict == False
"""
# Run
instance = GreaterThan(low='a', high='b')
# Asserts
assert instance._low == 'a'
assert instance._high == 'b'
assert instance._strict is False
assert instance._high_is_scalar is None
assert instance._low_is_scalar is None
assert instance._drop is None
def test___init___all_parameters_passed(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
- strict = True
- drop = 'high'
- high_is_scalar = True
- low_is_scalar = False
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._stric == True
- instance._drop = 'high'
- instance._high_is_scalar = True
- instance._low_is_scalar = False
"""
# Run
instance = GreaterThan(low='a', high='b', strict=True, drop='high',
high_is_scalar=True, low_is_scalar=False)
# Asserts
assert instance._low == 'a'
assert instance._high == 'b'
assert instance._strict is True
assert instance._high_is_scalar is True
assert instance._low_is_scalar is False
assert instance._drop == 'high'
def test_fit__low_is_scalar_is_none_determined_as_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if low is
a scalar if ``_low_is_scalar`` is None.
Input:
- Table without ``low`` in columns.
Side Effect:
- ``_low_is_scalar`` should be set to ``True``.
"""
# Setup
instance = GreaterThan(low=3, high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._low_is_scalar is True
def test_fit__low_is_scalar_is_none_determined_as_column(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if low is
a column name if ``_low_is_scalar`` is None.
Input:
- Table with ``low`` in columns.
Side Effect:
- ``_low_is_scalar`` should be set to ``False``.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._low_is_scalar is False
def test_fit__high_is_scalar_is_none_determined_as_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if high is
a scalar if ``_high_is_scalar`` is None.
Input:
- Table without ``high`` in columns.
Side Effect:
- ``_high_is_scalar`` should be set to ``True``.
"""
# Setup
instance = GreaterThan(low='a', high=3)
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._high_is_scalar is True
def test_fit__high_is_scalar_is_none_determined_as_column(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if high is
a column name if ``_high_is_scalar`` is None.
Input:
- Table with ``high`` in columns.
Side Effect:
- ``_high_is_scalar`` should be set to ``False``.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._high_is_scalar is False
def test_fit__high_is_scalar__low_is_scalar_raises_error(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should raise an error if
`_low_is_scalar` and `_high_is_scalar` are true.
Input:
- Table with one column.
Side Effect:
- ``TypeError`` is raised.
"""
# Setup
instance = GreaterThan(low=1, high=2)
# Run / Asserts
table_data = pd.DataFrame({'a': [1, 2, 3]})
with pytest.raises(TypeError):
instance.fit(table_data)
def test_fit__column_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to ``instance._high`` if ``instance_drop`` is `high`.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'b'
def test_fit__column_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to ``instance._low`` if ``instance_drop`` is `low`.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'a'
def test_fit__column_to_reconstruct_default(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to `high` by default.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'b'
def test_fit__column_to_reconstruct_high_is_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to `low` if ``instance._high_is_scalar`` is ``True``.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'a'
def test_fit__diff_column_one_column(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_diff_column``
to the one column in ``instance.constraint_columns`` plus a
token if there is only one column in that set.
Input:
- Table with one column.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high=3, high_is_scalar=True)
# Run
table_data = pd.DataFrame({'a': [1, 2, 3]})
instance.fit(table_data)
# Asserts
assert instance._diff_column == 'a#'
def test_fit__diff_column_multiple_columns(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_diff_column``
to the two columns in ``instance.constraint_columns`` separated
by a token if there both columns are in that set.
Input:
- Table with two column.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._diff_column == 'a#b'
def test_fit_int(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of integers.
Side Effect:
- The _dtype attribute gets `int` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'i'
def test_fit_float(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of float values.
Side Effect:
- The _dtype attribute gets `float` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'f'
def test_fit_datetime(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns of datetimes.
Side Effect:
- The _dtype attribute gets `datetime` as the value.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01']),
'b': pd.to_datetime(['2020-01-02'])
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'M'
def test_fit_type__high_is_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should learn and store the
``dtype`` of the ``low`` column as the ``_dtype`` attribute
if ``_high_is_scalar`` is ``True``.
Input:
- Table that contains two constrained columns with the low one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low='a', high=3)
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'f'
def test_fit_type__low_is_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` is ``True``.
Input:
- Table that contains two constrained columns with the high one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low=3, high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'f'
def test_is_valid_strict_false(self):
"""Test the ``GreaterThan.is_valid`` method with strict False.
If strict is False, equal values should count as valid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- False should be returned for the strictly invalid row and True
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_strict_true(self):
"""Test the ``GreaterThan.is_valid`` method with strict True.
If strict is True, equal values should count as invalid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- True should be returned for the strictly valid row and False
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_low_is_scalar_high_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is a column name, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=False, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False], name='b')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_high_is_scalar_low_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is a column name, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low='a', high=2, strict=False, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False], name='a')
pd.testing.assert_series_equal(expected_out, out)
def test_transform_int_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_column = 'a#b'
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_int_drop_high(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the high column.
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the high column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._diff_column = 'a#b'
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_int_drop_low(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the low column.
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the low column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._diff_column = 'a#b'
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_float_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type float.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_column = 'a#b'
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_datetime_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type datetime.
If the columns are of type datetime, ``transform`` is expected
to convert the timedelta distance into numeric before applying
the +1 and logarithm.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with values at a distance of exactly 1 second.
Output:
- Same table with a diff column of the logarithms
of the dinstance in nanoseconds + 1, which is np.log(1_000_000_001).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_column = 'a#b'
instance._is_datetime = True
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_not_all_columns_provided(self):
"""Test the ``GreaterThan.transform`` method.
If some of the columns needed for the transform are missing, it will raise
a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, fit_columns_model=False)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test_transform_high_is_scalar(self):
"""Test the ``GreaterThan.transform`` method with high as scalar.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_high_is_scalar`` is ``True``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high=5, strict=True, high_is_scalar=True)
instance._diff_column = 'a#b'
instance.constraint_columns = ['a']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(5), np.log(4), np.log(3)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_low_is_scalar(self):
"""Test the ``GreaterThan.transform`` method with high as scalar.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_high_is_scalar`` is ``True``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low=2, high='b', strict=True, low_is_scalar=True)
instance._diff_column = 'a#b'
instance.constraint_columns = ['b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(3), np.log(4), np.log(5)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_float_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype float.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to float values
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as float values
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = np.dtype('float')
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'b': [4.1, 5.2, 6.3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the high column replaced by the low one + one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = np.dtype('<M8[ns]')
instance._diff_column = 'a#b'
instance._is_datetime = True
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the low column replaced by the high one - 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'a'
# Run
transformed = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a': [1, 2, 3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- subtract from the high column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the low column replaced by the high one - one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = np.dtype('<M8[ns]')
instance._diff_column = 'a#b'
instance._is_datetime = True
instance._column_to_reconstruct = 'a'
# Run
transformed = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column when the row is invalid
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + one second
for all invalid rows, and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = np.dtype('<M8[ns]')
instance._diff_column = 'a#b'
instance._is_datetime = True
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-01T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2]
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_low_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_low`` is set to an int and ``_low_is_scalar`` is ``True``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=True, low_is_scalar=True)
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 6, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_high_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with high as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_high`` is set to an int and ``_high_is_scalar`` is ``True``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high value.
Output:
- Same table with the low column replaced by the high one - 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high=3, strict=True, high_is_scalar=True)
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'a'
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 0],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
class TestPositive():
def test__init__(self):
"""
Test the ``Positive.__init__`` method.
The method is expected to set the ``_low`` instance variable
to 0, the ``_low_is_scalar`` variable to ``True`` and the
``_high_is_scalar`` variable to ``False``. The rest of the
parameters should be passed.
Input:
- strict = True
- high = 'a'
- drop = None
Side effects:
- instance._low == 0
- instance._high == 'a'
- instance._strict == True
- instance._high_is_scalar = False
- instance._low_is_scalar = True
- instance._drop = None
"""
# Run
instance = Positive(high='a', strict=True, drop=None)
# Asserts
assert instance._low == 0
assert instance._high == 'a'
assert instance._strict is True
assert instance._high_is_scalar is False
assert instance._low_is_scalar is True
assert instance._drop is None
class TestNegative():
def test__init__(self):
"""
Test the ``Negative.__init__`` method.
The method is expected to set the ``_high`` instance variable
to 0, the ``_high_is_scalar`` variable to ``True`` and the
``_low_is_scalar`` variable to ``False``. The rest of the
parameters should be passed.
Input:
- strict = True
- low = 'a'
- drop = None
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._high_is_scalar = True
- instance._low_is_scalar = False
- instance._drop = None
"""
# Run
instance = Negative(low='a', strict=True, drop=None)
# Asserts
assert instance._low == 'a'
assert instance._high == 0
assert instance._strict is True
assert instance._high_is_scalar is True
assert instance._low_is_scalar is False
assert instance._drop is None
def new_column(data):
"""Formula to be used for the ``TestColumnFormula`` class."""
return data['a'] + data['b']
class TestColumnFormula():
def test___init__(self):
"""Test the ``ColumnFormula.__init__`` method.
It is expected to create a new Constraint instance
and import the formula to use for the computation.
Input:
- column = 'c'
- formula = new_column
"""
# Setup
column = 'c'
# Run
instance = ColumnFormula(column=column, formula=new_column)
# Assert
assert instance._column == column
assert instance._formula == new_column
def test_is_valid_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a valid data.
If the data fulfills the formula, result is a series of ``True`` values.
Input:
- Table data fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a non-valid data.
If the data does not fulfill the formula, result is a series of ``False`` values.
Input:
- Table data not fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 2, 3]
})
instance = ColumnFormula(column=column, formula=new_column)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``ColumnFormula.transform`` method.
It is expected to drop the indicated column from the table.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data without the indicated column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform(self):
"""Test the ``ColumnFormula.reverse_transform`` method.
It is expected to compute the indicated column by applying the given formula.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 1, 1]
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestRounding():
def test___init__(self):
"""Test the ``Rounding.__init__`` method.
It is expected to create a new Constraint instance
and set the rounding args.
Input:
- columns = ['b', 'c']
- digits = 2
"""
# Setup
columns = ['b', 'c']
digits = 2
# Run
instance = Rounding(columns=columns, digits=digits)
# Assert
assert instance._columns == columns
assert instance._digits == digits
def test___init__invalid_digits(self):
"""Test the ``Rounding.__init__`` method with an invalid argument.
Pass in an invalid ``digits`` argument, and expect a ValueError.
Input:
- columns = ['b', 'c']
- digits = 20
"""
# Setup
columns = ['b', 'c']
digits = 20
# Run
with pytest.raises(ValueError):
Rounding(columns=columns, digits=digits)
def test___init__invalid_tolerance(self):
"""Test the ``Rounding.__init__`` method with an invalid argument.
Pass in an invalid ``tolerance`` argument, and expect a ValueError.
Input:
- columns = ['b', 'c']
- digits = 2
- tolerance = 0.1
"""
# Setup
columns = ['b', 'c']
digits = 2
tolerance = 0.1
# Run
with pytest.raises(ValueError):
Rounding(columns=columns, digits=digits, tolerance=tolerance)
def test_is_valid_positive_digits(self):
"""Test the ``Rounding.is_valid`` method for a positive digits argument.
Input:
- Table data with desired decimal places (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
columns = ['b', 'c']
digits = 2
tolerance = 1e-3
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.12, 5.51, None, 6.941, 1.129],
'c': [5.315, 7.12, 1.12, 9.131, 12.329],
'd': ['a', 'b', 'd', 'e', None],
'e': [123.31598, -1.12001, 1.12453, 8.12129, 1.32923]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, True, False, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_negative_digits(self):
"""Test the ``Rounding.is_valid`` method for a negative digits argument.
Input:
- Table data with desired decimal places (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
columns = ['b']
digits = -2
tolerance = 1
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [401, 500, 6921, 799, None],
'c': [5.3134, 7.1212, 9.1209, 101.1234, None],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_zero_digits(self):
"""Test the ``Rounding.is_valid`` method for a zero digits argument.
Input:
- Table data not with the desired decimal places (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
columns = ['b', 'c']
digits = 0
tolerance = 1e-4
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, None, 3, 4],
'b': [4, 5.5, 1.2, 6.0001, 5.99999],
'c': [5, 7.12, 1.31, 9.00001, 4.9999],
'd': ['a', 'b', None, 'd', 'e'],
'e': [2.1254, 17.12123, 124.12, 123.0112, -9.129434]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_reverse_transform_positive_digits(self):
"""Test the ``Rounding.reverse_transform`` method with positive digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
digits = 3
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, None, 4],
'b': [4.12345, None, 5.100, 6.0001, 1.7999],
'c': [1.1, 1.234, 9.13459, 4.3248, 6.1312],
'd': ['a', 'b', 'd', 'e', None]
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, None, 4],
'b': [4.123, None, 5.100, 6.000, 1.800],
'c': [1.100, 1.234, 9.135, 4.325, 6.131],
'd': ['a', 'b', 'd', 'e', None]
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_negative_digits(self):
"""Test the ``Rounding.reverse_transform`` method with negative digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b']
digits = -3
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [41234.5, None, 5000, 6001, 5928],
'c': [1.1, 1.23423, 9.13459, 12.12125, 18.12152],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [41000.0, None, 5000.0, 6000.0, 6000.0],
'c': [1.1, 1.23423, 9.13459, 12.12125, 18.12152],
'd': ['a', 'b', 'd', 'e', 'f']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_zero_digits(self):
"""Test the ``Rounding.reverse_transform`` method with zero digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
digits = 0
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.12345, None, 5.0, 6.01, 7.9],
'c': [1.1, 1.0, 9.13459, None, 8.89],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.0, None, 5.0, 6.0, 8.0],
'c': [1.0, 1.0, 9.0, None, 9.0],
'd': ['a', 'b', 'd', 'e', 'f']
})
pd.testing.assert_frame_equal(expected_out, out)
def transform(data, low, high):
"""Transform to be used for the TestBetween class."""
data = (data - low) / (high - low) * 0.95 + 0.025
return np.log(data / (1.0 - data))
class TestBetween():
def test_transform_scalar_scalar(self):
"""Test the ``Between.transform`` method by passing ``low`` and ``high`` as scalars.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [4, 5, 6],
})
instance.fit(table_data)
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'a#0.0#1.0': transform(table_data[column], low, high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test_transform_scalar_column(self):
"""Test the ``Between.transform`` method with ``low`` as scalar and ``high`` as a column.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0.5, 1, 6],
})
instance.fit(table_data)
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0.5, 1, 6],
'a#0.0#b': transform(table_data[column], low, table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test_transform_column_scalar(self):
"""Test the ``Between.transform`` method with ``low`` as a column and ``high`` as scalar.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0, -1, 0.5],
})
instance.fit(table_data)
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0, -1, 0.5],
'a#b#1.0': transform(table_data[column], table_data[low], high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test_transform_column_column(self):
"""Test the ``Between.transform`` method by passing ``low`` and ``high`` as columns.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0, -1, 0.5],
'c': [0.5, 1, 6]
})
instance.fit(table_data)
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_scalar_scalar(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as scalars.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
table_data = pd.DataFrame({
'b': [4, 5, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [4, 5, 6],
'a#0.0#1.0': transform(table_data[column], low, high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_scalar_column(self):
"""Test ``Between.reverse_transform`` with ``low`` as scalar and ``high`` as a column.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
table_data = pd.DataFrame({
'b': [0.5, 1, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0.5, 1, 6],
'a#0.0#b': transform(table_data[column], low, table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_column_scalar(self):
"""Test ``Between.reverse_transform`` with ``low`` as a column and ``high`` as scalar.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
table_data = pd.DataFrame({
'b': [0, -1, 0.5],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0, -1, 0.5],
'a#b#1.0': transform(table_data[column], table_data[low], high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_column_column(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as columns.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
table_data = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_is_valid_strict_true(self):
"""Test the ``Between.is_valid`` method with strict True.
If strict is True, equal values should count as invalid.
Input:
- Table with a valid row, a strictly invalid row and an
invalid row. (pandas.DataFrame)
Output:
- True should be returned for the valid row and False
for the other two. (pandas.Series)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, strict=True, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 1, 3],
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False])
|
pd.testing.assert_series_equal(expected_out, out, check_names=False)
|
pandas.testing.assert_series_equal
|
"""
Copyright 2022 HSBC Global Asset Management (Deutschland) GmbH
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import pandas as pd
import pytest
import pyratings as rtg
from tests import conftest
@pytest.fixture(scope="session")
def rtg_inputs_longterm():
return pd.DataFrame(
data={
"rtg_sp": ["AAA", "AA-", "AA+", "BB-", "C", np.nan, "BBB+", "AA"],
"rtg_moody": ["Aa1", "Aa3", "Aa2", "Ba3", "Ca", np.nan, np.nan, "Aa2"],
"rtg_fitch": ["AA-", np.nan, "AA-", "B+", "C", np.nan, np.nan, "AA"],
}
)
@pytest.fixture(scope="session")
def rtg_inputs_shortterm():
return pd.DataFrame(
data={
"rtg_sp": ["A-1", "A-3", "A-1+", "D", "B", np.nan, "A-2", "A-3"],
"rtg_moody": ["P-2", "NP", "P-1", "NP", "P-3", np.nan, np.nan, "P-3"],
"rtg_fitch": ["F1", np.nan, "F1", "F3", "F3", np.nan, np.nan, "F3"],
}
)
def test_get_best_rating_longterm_with_explicit_rating_provider(rtg_inputs_longterm):
"""Test computation of best ratings on a security (line-by-line) basis."""
actual = rtg.get_best_ratings(
rtg_inputs_longterm,
rating_provider_input=["SP", "Moody", "Fitch"],
tenor="long-term",
)
expectations = pd.Series(
data=["AAA", "AA-", "AA+", "BB-", "CC", np.nan, "BBB+", "AA"], name="best_rtg"
)
pd.testing.assert_series_equal(actual, expectations)
def test_get_best_rating_longterm_with_inferring_rating_provider(rtg_inputs_longterm):
"""Test computation of best ratings on a security (line-by-line) basis."""
actual = rtg.get_best_ratings(rtg_inputs_longterm, tenor="long-term")
expectations = pd.Series(
data=["AAA", "AA-", "AA+", "BB-", "CC", np.nan, "BBB+", "AA"], name="best_rtg"
)
pd.testing.assert_series_equal(actual, expectations)
def test_get_best_rating_shortterm_with_explicit_rating_provider(rtg_inputs_shortterm):
"""Test computation of best ratings on a security (line-by-line) basis."""
actual = rtg.get_best_ratings(
rtg_inputs_shortterm,
rating_provider_input=["SP", "Moody", "Fitch"],
tenor="short-term",
)
expectations = pd.Series(
data=["A-1", "A-3", "A-1+", "A-3", "A-3", np.nan, "A-2", "A-3"], name="best_rtg"
)
pd.testing.assert_series_equal(actual, expectations)
def test_get_best_rating_shortterm_with_inferring_rating_provider(rtg_inputs_shortterm):
"""Test computation of best ratings on a security (line-by-line) basis."""
actual = rtg.get_best_ratings(rtg_inputs_shortterm, tenor="short-term")
expectations = pd.Series(
data=["A-1", "A-3", "A-1+", "A-3", "A-3", np.nan, "A-2", "A-3"], name="best_rtg"
)
|
pd.testing.assert_series_equal(actual, expectations)
|
pandas.testing.assert_series_equal
|
from datetime import (
datetime,
timedelta,
)
import re
import numpy as np
import pytest
from pandas._libs import iNaT
from pandas.errors import InvalidIndexError
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_integer
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
isna,
notna,
)
import pandas._testing as tm
import pandas.core.common as com
# We pass through a TypeError raised by numpy
_slice_msg = "slice indices must be integers or None or have an __index__ method"
class TestDataFrameIndexing:
def test_getitem(self, float_frame):
# Slicing
sl = float_frame[:20]
assert len(sl.index) == 20
# Column access
for _, series in sl.items():
assert len(series.index) == 20
assert tm.equalContents(series.index, sl.index)
for key, _ in float_frame._series.items():
assert float_frame[key] is not None
assert "random" not in float_frame
with pytest.raises(KeyError, match="random"):
float_frame["random"]
def test_getitem2(self, float_frame):
df = float_frame.copy()
df["$10"] = np.random.randn(len(df))
ad = np.random.randn(len(df))
df["@awesome_domain"] = ad
with pytest.raises(KeyError, match=re.escape("'df[\"$10\"]'")):
df.__getitem__('df["$10"]')
res = df["@awesome_domain"]
tm.assert_numpy_array_equal(ad, res.values)
def test_setitem_list(self, float_frame):
float_frame["E"] = "foo"
data = float_frame[["A", "B"]]
float_frame[["B", "A"]] = data
tm.assert_series_equal(float_frame["B"], data["A"], check_names=False)
tm.assert_series_equal(float_frame["A"], data["B"], check_names=False)
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
data[["A"]] = float_frame[["A", "B"]]
newcolumndata = range(len(data.index) - 1)
msg = (
rf"Length of values \({len(newcolumndata)}\) "
rf"does not match length of index \({len(data)}\)"
)
with pytest.raises(ValueError, match=msg):
data["A"] = newcolumndata
def test_setitem_list2(self):
df = DataFrame(0, index=range(3), columns=["tt1", "tt2"], dtype=np.int_)
df.loc[1, ["tt1", "tt2"]] = [1, 2]
result = df.loc[df.index[1], ["tt1", "tt2"]]
expected = Series([1, 2], df.columns, dtype=np.int_, name=1)
tm.assert_series_equal(result, expected)
df["tt1"] = df["tt2"] = "0"
df.loc[df.index[1], ["tt1", "tt2"]] = ["1", "2"]
result = df.loc[df.index[1], ["tt1", "tt2"]]
expected = Series(["1", "2"], df.columns, name=1)
tm.assert_series_equal(result, expected)
def test_getitem_boolean(self, mixed_float_frame, mixed_int_frame, datetime_frame):
# boolean indexing
d = datetime_frame.index[10]
indexer = datetime_frame.index > d
indexer_obj = indexer.astype(object)
subindex = datetime_frame.index[indexer]
subframe = datetime_frame[indexer]
tm.assert_index_equal(subindex, subframe.index)
with pytest.raises(ValueError, match="Item wrong length"):
datetime_frame[indexer[:-1]]
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
with pytest.raises(ValueError, match="Boolean array expected"):
datetime_frame[datetime_frame]
# test that Series work
indexer_obj = Series(indexer_obj, datetime_frame.index)
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
# test that Series indexers reindex
# we are producing a warning that since the passed boolean
# key is not the same as the given index, we will reindex
# not sure this is really necessary
with tm.assert_produces_warning(UserWarning):
indexer_obj = indexer_obj.reindex(datetime_frame.index[::-1])
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
# test df[df > 0]
for df in [
datetime_frame,
mixed_float_frame,
mixed_int_frame,
]:
data = df._get_numeric_data()
bif = df[df > 0]
bifw = DataFrame(
{c: np.where(data[c] > 0, data[c], np.nan) for c in data.columns},
index=data.index,
columns=data.columns,
)
# add back other columns to compare
for c in df.columns:
if c not in bifw:
bifw[c] = df[c]
bifw = bifw.reindex(columns=df.columns)
tm.assert_frame_equal(bif, bifw, check_dtype=False)
for c in df.columns:
if bif[c].dtype != bifw[c].dtype:
assert bif[c].dtype == df[c].dtype
def test_getitem_boolean_casting(self, datetime_frame):
# don't upcast if we don't need to
df = datetime_frame.copy()
df["E"] = 1
df["E"] = df["E"].astype("int32")
df["E1"] = df["E"].copy()
df["F"] = 1
df["F"] = df["F"].astype("int64")
df["F1"] = df["F"].copy()
casted = df[df > 0]
result = casted.dtypes
expected = Series(
[np.dtype("float64")] * 4
+ [np.dtype("int32")] * 2
+ [np.dtype("int64")] * 2,
index=["A", "B", "C", "D", "E", "E1", "F", "F1"],
)
tm.assert_series_equal(result, expected)
# int block splitting
df.loc[df.index[1:3], ["E1", "F1"]] = 0
casted = df[df > 0]
result = casted.dtypes
expected = Series(
[np.dtype("float64")] * 4
+ [np.dtype("int32")]
+ [np.dtype("float64")]
+ [np.dtype("int64")]
+ [np.dtype("float64")],
index=["A", "B", "C", "D", "E", "E1", "F", "F1"],
)
tm.assert_series_equal(result, expected)
def test_getitem_boolean_list(self):
df = DataFrame(np.arange(12).reshape(3, 4))
def _checkit(lst):
result = df[lst]
expected = df.loc[df.index[lst]]
tm.assert_frame_equal(result, expected)
_checkit([True, False, True])
_checkit([True, True, True])
_checkit([False, False, False])
def test_getitem_boolean_iadd(self):
arr = np.random.randn(5, 5)
df = DataFrame(arr.copy(), columns=["A", "B", "C", "D", "E"])
df[df < 0] += 1
arr[arr < 0] += 1
tm.assert_almost_equal(df.values, arr)
def test_boolean_index_empty_corner(self):
# #2096
blah = DataFrame(np.empty([0, 1]), columns=["A"], index=DatetimeIndex([]))
# both of these should succeed trivially
k = np.array([], bool)
blah[k]
blah[k] = 0
def test_getitem_ix_mixed_integer(self):
df = DataFrame(
np.random.randn(4, 3), index=[1, 10, "C", "E"], columns=[1, 2, 3]
)
result = df.iloc[:-1]
expected = df.loc[df.index[:-1]]
tm.assert_frame_equal(result, expected)
result = df.loc[[1, 10]]
expected = df.loc[Index([1, 10])]
tm.assert_frame_equal(result, expected)
def test_getitem_ix_mixed_integer2(self):
# 11320
df = DataFrame(
{
"rna": (1.5, 2.2, 3.2, 4.5),
-1000: [11, 21, 36, 40],
0: [10, 22, 43, 34],
1000: [0, 10, 20, 30],
},
columns=["rna", -1000, 0, 1000],
)
result = df[[1000]]
expected = df.iloc[:, [3]]
tm.assert_frame_equal(result, expected)
result = df[[-1000]]
expected = df.iloc[:, [1]]
tm.assert_frame_equal(result, expected)
def test_getattr(self, float_frame):
tm.assert_series_equal(float_frame.A, float_frame["A"])
msg = "'DataFrame' object has no attribute 'NONEXISTENT_NAME'"
with pytest.raises(AttributeError, match=msg):
float_frame.NONEXISTENT_NAME
def test_setattr_column(self):
df = DataFrame({"foobar": 1}, index=range(10))
df.foobar = 5
assert (df.foobar == 5).all()
def test_setitem(self, float_frame):
# not sure what else to do here
series = float_frame["A"][::2]
float_frame["col5"] = series
assert "col5" in float_frame
assert len(series) == 15
assert len(float_frame) == 30
exp = np.ravel(np.column_stack((series.values, [np.nan] * 15)))
exp =
|
Series(exp, index=float_frame.index, name="col5")
|
pandas.Series
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 14 12:12:23 2020
Individual programming project: Stock Quotes Application
@author: <NAME> (19202673)
Based on the code from tutorials in MIS41110 Programming for Analytics of UCD, @D. Redmond
Based on the code in tutorial at this website: https://www.bilibili.com/video/BV1da4y147iW (descriptive part)
Based on the code in tutorial at this website: https://mp.weixin.qq.com/s/59FhX-puUUEHQjJKzIDjow (predictive part)
In descriptive analytics, the application can gather historical stock quotes according to customized requirements.
The techniques provided are statistical description, Candlestick chart (daily, weekly, monthly), Moving Average,
Exponentially Weighted Moving Average, Moving Average Convergence Divergence, and Scatter chart.
In predictive analytics, you are able to get the predicted closing price of the stock in the next few days.
Root-mean-square error and r^2 will be provided to judge the credibility of the prediction results.
"""
import sys
import pandas as pd
import matplotlib.pyplot as plt
from mplfinance.original_flavor import candlestick_ochl
import numpy as np
import talib
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers.recurrent import LSTM
from keras.layers import Dense
# max columns to output
pd.set_option('display.max_columns', 8)
# max rows to output
pd.set_option('display.max_rows', 2000)
# to output normal number rather than Scientific notation
np.set_printoptions(suppress=True)
# to output normal number rather than Scientific notation
pd.set_option('display.float_format', lambda x: '%.2f' % x)
def greeting():
"""
say hello to users
"""
print('Hi, Welcome to Tianyi\'s Stock Quotes Application!')
print('=' * 50)
def function_select():
"""
show choices of this application and for people to choose
:return: string type, the number input by user
"""
print('\nThere are 5 choices for you:')
print('1. Descriptive Analytics')
print('2. Predictive Analytics')
print('3. Export Data')
print('4. Instructions for use')
print('5. Quit')
# get a number from user
number = input('Please enter a number: ')
return number
def instructions_for_users():
"""
Reading User Guide
"""
guide = open('user_manual.txt', encoding='UTF-8')
instructions = guide.read()
print(instructions)
guide.close()
# Judge the user's choice
next_step = input('\nDo you want to start analysing? (Y/N): ').upper()
if next_step == 'Y':
number = function_select()
company = load_company_list()
process_choice(number, company)
else:
quit()
def load_company_list():
"""
get company list for searching Stocks
:return: pandas DataFrame, a company list
"""
return pd.read_csv('./companylist.csv')
def format_company_list(company_list):
"""
searching stock's symbol and call function query_time_series
:param company_list: pandas DataFrame
:return val: pandas DataFrame
:return clist: pandas DataFrame
"""
print('')
print('Search Stocks')
print('=' * 50)
# clist --> create a new DataFrame
clist = company_list
clist.sort_index()
# delete the column which is all NaN
clist.dropna(axis=1, how='all', inplace=True)
clist['index'] = [i for i in range(clist.shape[0])]
return clist
def get_symbol_name(clist):
"""
only to show the 'Symbol' and 'Name' columns avoiding too much information on the output screen
:param clist: pandas DataFrame
:return: pandas DataFrame
"""
return clist[['Symbol', 'Name']]
def search_symbol(company_symbol_name, c_list):
"""
search for symbol according to the input of customers
:param company_symbol_name: pandas DataFrame
(index: Symbol, Name, LastSale, MarketCap, IPOyear, Sector, industry, Summary Quote")
:param c_list: pandas DataFrame
(index: Symbol and Name)
:return: string
"""
val = company_symbol_name
clist = c_list
symbol = input("Please input ticker symbol or company name: ").lower()
filtered_companies = val[
(clist.Symbol.str.lower().str.contains(symbol)) | (clist.Name.str.lower().str.contains(symbol))]
# Determine if there is such a symbol or company name
while len(filtered_companies.index) == 0:
print('There\'s no such symbol or company, please try again!')
search_symbol(val, clist)
print(filtered_companies)
symbol_chosen = input('\nPlease input the Symbol: ').upper()
return symbol_chosen
# # after searching,do "Query Time Range", call query_time_series to do descriptive and predictive analytics
# query_time_series()
def get_all_historical_quotes(symbol_chosen):
"""
According to the symbol entered by the user, Get all the historical stock quotes from the Internet.
:param symbol_chosen: string
:return: pandas DataFrame
"""
symbol_choice = symbol_chosen
# Get historical stock quotes on the Internet according to the symbol entered by the user
url = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={}&outputsize=full&apikey=QGO6Z4WQY7X2ZY1V&datatype=csv'.format(
symbol_choice)
data = pd.read_csv(url)
# Process the format of historical quotes obtained
data = data.sort_index()
data['time'] = pd.to_datetime(data['timestamp'])
data = data.sort_values(by='time', ascending=True)
data['index'] = [i for i in range(data.shape[0])]
data['timestamp'] = pd.to_datetime(data['timestamp'])
data = data.set_index('timestamp')
return data
def judgement_1(company, stock_data):
"""
Judge the user's choice
:param company: .csv file
:param stock_data: pandas DataFrame
"""
store = input('\nDo you want to export data? (Y/N): ').upper()
if store == 'Y':
export_data(stock_data)
else:
number = function_select()
company = load_company_list()
process_choice(number, company)
def get_start_day():
"""
get the start date the user want to consult
:return: Date
"""
print('')
print('Query Time Range')
print('=' * 50)
print('Please input a time range')
return input('start time (yyyy-mm-dd): ')
def get_end_day():
"""
get the end date the user want to consult
:return: Date
"""
return input('end time (yyyy-mm-dd): ')
def query_time_series(data, start_date, end_date):
"""
According to the set date range, get the corresponding stock historical quotation
:param data: pandas DataFrame
:param start_date: Date (yyyy-mm-dd)
:param end_date: Date (yyyy-mm-dd)
:return: pandas DataFrame
"""
all_historical_quotes = data
# Obtain historical quotes for the corresponding time period according to the date entered by the user
con1 = all_historical_quotes['time'] >= start_date
con2 = all_historical_quotes['time'] <= end_date
data_chosen = all_historical_quotes[con1 & con2]
return data_chosen
def show_data(data, open_day, end_day, symbol_choice):
"""
the overall describe of selected stock quotes: count, mean, std, 25%, 75%, median
:param data: pandas DataFrame
:param open_day: Date (yyyy-mm-dd)
:param end_day: Date (yyyy-mm-dd)
:param symbol_choice: string
"""
data_chosen = data
data_shown = data_chosen[['open', 'close', 'high', 'low', 'volume']]
print('')
print('The overall description of {}, from {} to {}'.format(symbol_choice, open_day, end_day))
print('*' * 65)
describe = data_shown.describe()
# calculate Coefficient of Variation
mean = describe.loc['mean']
std = describe.loc['std']
coefficient = mean / std
# change coefficient to a pandas dataframe
coefficient = coefficient.to_frame()
coefficient = coefficient.T
coefficient.index = ['Coefficient of Variation']
# concat describe and coefficient
overall_describe = pd.concat([describe, coefficient], axis=0)
# Visualization
print(overall_describe)
def get_k_type():
"""
get the kind of stock candlestick chart the user want to see
:return: string: 1. daily 2. weekly 3. monthly
"""
K_type = input(
'\nWhat kind of stock candlestick chart do you want to see? \n(1. daily 2. weekly 3. monthly): ')
return K_type
def get_MA_period():
"""
get the time range for Moving Average
:return: int type, a number (time range (unit: day)) which will pass to function moving_average
"""
SAM_period = int(
input('\nPlease enter the time range (unit: day) \nyou expect for Moving Average (enter a number): '))
return SAM_period
def k_daily(data, start_day, end_day, symbol_choice):
"""
plotting Daily Candlestick chart
:param data: pandas DataFrame
:param start_day: Date (yyyy-mm-dd)
:param end_day: Date (yyyy-mm-dd)
:param symbol_choice: string
"""
data_chosen = data
valu_day = data_chosen[['index', 'open', 'close', 'high', 'low']].values
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(20, 8), dpi=80)
candlestick_ochl(axes, valu_day, width=0.2, colorup='r', colordown='g')
# Visualization
plt.xlabel('Date Range from {} to {}'.format(start_day, end_day))
plt.title('Daily Candlestick for {}'.format(symbol_choice))
plt.show()
def k_weekly(data, start_day, end_day, symbol_choice):
"""
plotting weekly Candlestick chart
:param data: pandas DataFrame
:param start_day: Date (yyyy-mm-dd)
:param end_day: Date (yyyy-mm-dd)
:param symbol_choice: string
"""
data_chosen = data
# resample as a stock for each week
stock_week_k = data_chosen.resample('w').last()
stock_week_k['open'] = data_chosen['open'].resample('w').first()
stock_week_k['close'] = data_chosen['close'].resample('w').last()
stock_week_k['high'] = data_chosen['high'].resample('w').max()
stock_week_k['low'] = data_chosen['low'].resample('w').min()
stock_week_k['volume'] = data_chosen['volume'].resample('w').sum()
# reset index
stock_week_k['index'] = [i for i in range(stock_week_k.shape[0])]
# Visualization
valu_week = stock_week_k[['index', 'open', 'close', 'high', 'low']].values
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(20, 8), dpi=80)
candlestick_ochl(axes, valu_week, width=0.2, colorup='r', colordown='g')
plt.xlabel('Date Range from {} to {}'.format(start_day, end_day))
plt.title('Weekly Candlestick for {}'.format(symbol_choice))
plt.show()
def k_monthly(data, start_day, end_day, symbol_choice):
"""
plotting monthly Candlestick chart
:param data: pandas DataFrame
:param start_day: Date (yyyy-mm-dd)
:param end_day: Date (yyyy-mm-dd)
:param symbol_choice: string
"""
data_chosen = data
# resample as a stock for each month
stock_month_k = data_chosen.resample('m').last()
stock_month_k['open'] = data_chosen['open'].resample('m').first()
stock_month_k['close'] = data_chosen['close'].resample('m').last()
stock_month_k['high'] = data_chosen['high'].resample('m').max()
stock_month_k['low'] = data_chosen['low'].resample('m').min()
stock_month_k['volume'] = data_chosen['volume'].resample('m').sum()
# reset index
stock_month_k['index'] = [i for i in range(stock_month_k.shape[0])]
valu_month = stock_month_k[['index', 'open', 'close', 'high', 'low']].values
# Visualization
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(20, 8), dpi=80)
candlestick_ochl(axes, valu_month, width=0.6, colorup='r', colordown='g')
plt.xlabel('Date Range from {} to {}'.format(start_day, end_day))
plt.title('Monthly Candlestick for {}'.format(symbol_choice))
plt.show()
def moving_average(data, SAM_period, symbol_choice):
"""
plotting moving average and Exponentially Weighted Moving Average chart
:param data: pandas DataFrame
:param SAM_period: int
:param symbol_choice: string
:return:
"""
data_chosen = data
# Simple Moving Average
pd.Series.rolling(data_chosen['close'], window=SAM_period).mean().plot(figsize=(20, 8),
label='Simple Moving Average')
# Exponentially Weighted Moving Average
pd.Series.ewm(data_chosen['close'], span=SAM_period).mean().plot(figsize=(20, 8),
label='Exponentially Weighted Moving '
'Average')
# Visualization
plt.legend(loc='best')
plt.title('{}'.format(symbol_choice))
plt.show()
def MACD(data, start_day, end_day, symbol_choice):
"""
plotting Moving Average Convergence Divergence chart
:param data: pandas DataFrame
:param start_day: Date (yyyy-mm-dd)
:param end_day: Date (yyyy-mm-dd)
:param symbol_choice: string
"""
data_chosen = data
# organize values of data_chosen
val = data_chosen[['index', 'open', 'close', 'high', 'low']]
val['index'] = [i for i in range(val.shape[0])]
# plotting
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(20, 8), dpi=80)
candlestick_ochl(axes, val.values, width=0.2, colorup='r', colordown='g')
# MACD-->dif, macdsignal-->dea, macdhist-->macdbar
dif, dea, macdbar = talib.MACD(val['close'].values, fastperiod=12, slowperiod=26, signalperiod=9)
# x-aix
x = [i for i in range(val.shape[0])]
# If it is a positive value, the output is a red bar. If it is negative, the output is a green bar
bar1 = np.where(macdbar > 0, macdbar, 0)
bar2 = np.where(macdbar < 0, macdbar, 0)
plt.bar(x, bar1, color='r', label='up')
plt.bar(x, bar2, color='g', label='down')
# Visualization
plt.plot(x, dif, label='MACD')
plt.plot(x, dea, label='MACD_signal')
plt.xlabel('Date Range from {} to {}'.format(start_day, end_day))
plt.title('Moving Average Convergence Divergence for {}'.format(symbol_choice))
plt.legend(loc='best')
plt.show()
def scatter(data, symbol_choice):
"""
plotting Scatter plots for each pair of variables
:param data: pandas DataFrame
:param symbol_choice: string
:return:
"""
# organize values of data_chosen
data_chosen = data
scatter_data = data_chosen.sort_index()
scatter_data['index'] = [i for i in range(scatter_data.shape[0])]
frame = scatter_data[['open', 'close', 'high', 'low', 'volume']]
# Visualization
pd.plotting.scatter_matrix(frame, figsize=(15, 15))
plt.title('Scatter plot of each indicator pairwise correlation for {}'.format(symbol_choice))
plt.legend(loc='best')
plt.show()
def k_chart(K_type, data, start_day, end_day, symbol_choice):
"""
Get the return value in function get_k_type, and determine the type of Stock Candlestick chart to plot
:param K_type: string
:param data: pandas DataFrame
:param start_day: Date(yyyy-mm-dd)
:param end_day: Date (yyyy-mm-dd)
:param symbol_choice: string
"""
if K_type == '1':
k_daily(data, start_day, end_day, symbol_choice)
elif K_type == '2':
k_weekly(data, start_day, end_day, symbol_choice)
elif K_type == '3':
k_monthly(data, start_day, end_day, symbol_choice)
else:
print('Invalid choice! Try again please.')
print('')
k_chart(K_type, data, start_day, end_day, symbol_choice)
def judgement_2():
"""
Judge the user's choice
"""
next_step = input('\nDo you want to analyse the stock of another company? (Y/N): ').upper()
if next_step == 'Y':
number = function_select()
company = load_company_list()
process_choice(number, company)
else:
quit()
def get_size(data):
"""
get the total number of historical stock quotes
:param data: pandas DataFrame
:return: int
"""
data_predict = data
return int(data_predict.shape[0] * 0.8)
def get_predict_data(data):
"""
sort the obtained pandas DataFrame
:param data: pandas DataFrame
:return: pandas DataFrame
"""
all_quotes = data
data_predict = all_quotes[['open', 'close', 'high', 'low', 'volume']]
data_predict = data_predict.sort_index(axis=0, ascending=True)
return data_predict
def get_days_to_predict():
"""
Users enter how many days they want to predict backwards
:return: int
"""
return int(input('Please input how many days do you want to predict (a number): '))
def get_time_stamp():
"""
based on how many days do the user prefer to predict the next closing price.
:return: int
"""
return int(
input('Based on how many days do you want to predict the next closing price? (enter a number): '))
def get_train_data_set(data, data_size, time_stamp):
"""
Divide the acquired data into training set and validation set, 80% of the historical stock quotes
is used as the training set
:param data: pandas DataFrame
:param data_size: int
:param time_stamp: int
:return: DataFrame
"""
data_predict = data
return data_predict[0:data_size + time_stamp]
def get_valid_data_set(data, data_size, time_stamp):
"""
Divide the acquired data into training set and validation set, 20% of the historical stock quotes
is used as the validation set
:param data: pandas DataFrame
:param data_size: int
:param time_stamp: int
:return: pandas DataFrame
"""
data_predict = data
return data_predict[data_size - time_stamp:]
def get_scaler():
"""
Normalization parameter
:return: MinMaxScaler()
"""
return MinMaxScaler(feature_range=(0, 1))
def get_scaled_training_data(scaler, train_data_set):
"""
Normalize the training set
:param scaler: MinMaxScaler()
:param train_data_set: pandas DataFrame
:return: pandas DataFrame
"""
return scaler.fit_transform(train_data_set)
def get_scaled_validation_data(scaler, valid_data_set):
"""
Normalize the validation set
:param scaler: MinMaxScaler()
:param valid_data_set: pandas DataFrame
:return: pandas DataFrame
"""
return scaler.fit_transform(valid_data_set)
def train_data_x(scaled_data, time_stamp, train_data):
"""
get scaled training data, and create to a numpy array
:param scaled_data: pandas DataFrame
:param time_stamp: int
:param train_data: pandas DataFrame
:return: numpy array
"""
# get_scaled_training_data
train = train_data
x_train = []
for i in range(time_stamp, len(train)):
x_train.append(scaled_data[i - time_stamp:i])
x_train = np.array(x_train)
return x_train
def train_data_y(scaled_data, time_stamp, train_data):
"""
get scaled training data, and create to a numpy array
:param scaled_data: pandas DataFrame
:param time_stamp: int
:param train_data: pandas DataFrame
:return: numpy array
"""
y_train = []
for i in range(time_stamp, len(train_data)):
y_train.append(scaled_data[i, 3])
y_train = np.array(y_train)
return y_train
def valid_data_x(scaled_data, time_stamp, valid_data):
"""
get scaled validation data, and create to a numpy array
:param scaled_data: pandas DataFrame
:param time_stamp: int
:param valid_data: pandas DataFrame
:return: numpy array
"""
x_valid = []
for i in range(time_stamp, len(valid_data)):
x_valid.append(scaled_data[i - time_stamp:i])
x_valid = np.array(x_valid)
return x_valid
def valid_data_y(scaled_data, time_stamp, valid_data):
"""
get scaled validation data, and create a numpy array
:param scaled_data: pandas DataFrame
:param time_stamp: int
:param valid_data: pandas DataFrame
:return: numpy array
"""
y_valid = []
for i in range(time_stamp, len(valid_data)):
y_valid.append(scaled_data[i, 3])
y_valid = np.array(y_valid)
return y_valid
def LSTM_model(scaler, x_train, y_train, valid_data, x_valid):
"""
predicting the closing price by using Long short-term memory (an artificial recurrent neural network)
:param scaler: MinMaxScaler()
:param x_train: numpy array
:param y_train: numpy array
:param valid_data: pandas DataFrame
:param x_valid: numpy array
:return: numpy array: the predicting closing price
"""
# Hyper-parameter settings
epochs = 3
batch_size = 16
# LSTM Parameters: return_sequences=True.
# The LSTM output is a sequence. The default is False, and a value is output.
# input_dim:Enter the dimension of a single sample feature
# input_length:the length of time entered
model = Sequential()
model.add(LSTM(units=100, return_sequences=True, input_dim=x_train.shape[-1], input_length=x_train.shape[1]))
model.add(LSTM(units=50))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size, verbose=1)
# Forecast stock closing price
closing_price = model.predict(x_valid)
scaler.fit_transform(pd.DataFrame(valid_data['close'].values))
# Denormalize
closing_price = scaler.inverse_transform(closing_price)
return closing_price
def denormalize_valid_y(scaler, y_valid):
"""
denormalize the normalized y_valid
:param scaler: MinMaxScaler()
:param y_valid: numpy array
:return: numpy array
"""
return scaler.inverse_transform([y_valid])
def root_mean_squared(y_valid, closing_price):
"""
root mean squared of predicted pricing
:param y_valid: numpy array
:param closing_price: numpy array
"""
print('\nRoot Mean Squared Error and R^2: ')
RMSE = np.sqrt(np.mean(np.power((y_valid - closing_price), 2)))
print('RMSE = {}'.format(RMSE))
def r2(y_valid, closing_price):
"""
r^2 of predicted pricing
:param y_valid: numpy array
:param closing_price: numpy array
"""
r2 = 1 - (np.sum(np.power((y_valid - closing_price), 2))) / (
np.sum(np.power((y_valid - np.mean(closing_price)), 2)))
print('r^2 = {}'.format(r2))
def format(y_valid, closing_price):
"""
combine y_valid and closing_price, and then transfer to pandas DataFrame
:param y_valid: numpy array
:param closing_price: numpy array
:return: pandas DataFrame
"""
dict_data = {
'predictions': closing_price.reshape(1, -1)[0],
'close': y_valid[0]
}
data_pd =
|
pd.DataFrame(dict_data)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import scikits.bootstrap as sci
def time_across_length(runs, ids, times, sequence_lengths):
df_min = pd.DataFrame({"run": runs, "time": times}, index=ids)
df_min = df_min.pivot(index=df_min.index, columns="run")["time"]
df_min = df_min.min(axis=1)
df_min = pd.DataFrame(df_min, columns=["time"])
df_lengths = pd.DataFrame.from_dict(sequence_lengths, orient="index")
df_lengths.columns = ["length"] # pandas 0.22 from-dict does not allow doing it dire.
def fill_not_solved(df):
for count in range(1, len(df_lengths)):
df = df.fillna(5000 + 500 * count ** 1.6, limit=1)
return df
length_grouped = df_lengths.join(df_min, how="outer").groupby("length")
length_grouped = length_grouped.transform(fill_not_solved)
df = df_lengths.join(length_grouped, how="outer")
df = df.set_index("length")
return df.sort_index()
def _add_timeout_row(df, timeout):
last_row = df.tail(1)
last_row.index = [timeout]
return df.append(last_row)
def _add_start_row(df):
start_row =
|
pd.DataFrame({column: [0] for column in df.columns}, index=[1e-10])
|
pandas.DataFrame
|
"""
This script contains all necessary code to extract and convert all raw RESTORE predictions into a more uniform format using pandas multicolumn dataframes.
The resulting dataframe is written to a .csv and contains a five-dimensional header.
To load the .csv file correctly using pandas: RESTORE_df = pd.read_csv(path_to_file+'all_RESTORE_simulations.csv', header=[0,1,2,3,4])
Indexing is performed in the following way: RESTORE_df['UGent','v7.0','S1','incidences','mean'] returns the daily hospitalizations (incidences) in scenario S1 of report v7.0 by UGent.
"""
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2020 by <NAME>, BIOMATH, Ghent University. All Rights Reserved."
import os
import pandas as pd
import numpy as np
#########################
## Setup preliminaries ##
#########################
# Path to raw data
abs_dir = os.path.dirname(__file__)
raw_dir = os.path.join(abs_dir,'../../data/raw/RESTORE/simulations/')
iterim_dir = os.path.join(abs_dir,'../../data/interim/RESTORE/')
# Pre-allocation of results dataframe
index = pd.date_range(start='2020-09-01', end='2021-09-01')
columns = [[],[],[],[],[],[]]
tuples = list(zip(*columns))
columns = pd.MultiIndex.from_tuples(tuples, names=["author", "report version", "scenario", "inc/load", "statistic"])
df_RESTORE = pd.DataFrame(index=index, columns=columns)
#################
## RESTORE 4.2 ##
#################
folder = 'RESTORE_4.2/'
# ---------
# Load data
# ---------
# Load SIMID model
SIMID = pd.read_csv(raw_dir+folder+'PredictionsUHasseltmodel.csv', parse_dates=['Date'])
SIMID.index = SIMID['Date']
SIMID.pop('Date')
SIMID = SIMID.rename(columns={SIMID.columns[0]:'S1_load_mean'})
SIMID = SIMID.rename(columns={SIMID.columns[1]:'S1_load_LL'})
SIMID = SIMID.rename(columns={SIMID.columns[2]:'S1_load_UL'})
SIMID = SIMID.rename(columns={SIMID.columns[3]:'S2_load_mean'})
SIMID = SIMID.rename(columns={SIMID.columns[4]:'S2_load_LL'})
SIMID = SIMID.rename(columns={SIMID.columns[5]:'S2_load_UL'})
SIMID = SIMID.rename(columns={SIMID.columns[6]:'S3_load_mean'})
SIMID = SIMID.rename(columns={SIMID.columns[7]:'S3_load_LL'})
SIMID = SIMID.rename(columns={SIMID.columns[8]:'S3_load_UL'})
# Load UNamur model
UNamur = pd.read_csv(raw_dir+folder+'PredictionsUNamurmodel.csv')
UNamur.Date = pd.to_datetime(UNamur.Date, format='%d/%m/%y')
UNamur.index = UNamur['Date']
UNamur.pop('Date')
UNamur = UNamur.rename(columns={UNamur.columns[0]:'S1_incidences_mean'})
UNamur = UNamur.rename(columns={UNamur.columns[1]:'S1_incidences_LL'})
UNamur = UNamur.rename(columns={UNamur.columns[2]:'S1_incidences_UL'})
UNamur = UNamur.rename(columns={UNamur.columns[3]:'S1_load_mean'})
UNamur = UNamur.rename(columns={UNamur.columns[4]:'S1_load_LL'})
UNamur = UNamur.rename(columns={UNamur.columns[5]:'S1_load_UL'})
UNamur = UNamur.rename(columns={UNamur.columns[6]:'S2_incidences_mean'})
UNamur = UNamur.rename(columns={UNamur.columns[7]:'S2_incidences_LL'})
UNamur = UNamur.rename(columns={UNamur.columns[8]:'S2_incidences_UL'})
UNamur = UNamur.rename(columns={UNamur.columns[9]:'S2_load_mean'})
UNamur = UNamur.rename(columns={UNamur.columns[10]:'S2_load_LL'})
UNamur = UNamur.rename(columns={UNamur.columns[11]:'S2_load_UL'})
# Load VUB model
VUB = pd.read_csv(raw_dir+folder+'PredictionsVUBmodel.csv',skiprows=1,decimal=",")
VUB.Date = pd.to_datetime(VUB.Date, format='%d/%m/%y')
VUB.index = VUB['Date']
VUB.pop('Date')
VUB = VUB.rename(columns={VUB.columns[0]:'S1_load_mean'})
VUB = VUB.rename(columns={VUB.columns[2]:'S1_load_LL'})
VUB = VUB.rename(columns={VUB.columns[3]:'S1_load_UL'})
# -----------
# Assign data
# -----------
authors = ['SIMID','UNamur','VUB']
authors_df = [SIMID, UNamur, VUB]
report_v = 'v4.2'
scenarios = ['1','2','3']
statistics = ['mean', 'LL', 'UL']
for idx, author in enumerate(authors):
for scenario in scenarios:
for statistic in statistics:
if author == 'VUB':
if scenario == '1':
df_RESTORE[author,report_v,"S"+scenario,"load", statistic] = authors_df[idx]['S'+scenario+'_load_'+statistic]
elif author == 'SIMID':
df_RESTORE[author,report_v,"S"+scenario,"load", statistic] = authors_df[idx]['S'+scenario+'_load_'+statistic]
elif author == 'UNamur':
if ((scenario == '1') | (scenario == '2')):
df_RESTORE[author,report_v,"S"+scenario,"incidences", statistic] = authors_df[idx]['S'+scenario+'_incidences_'+statistic]
df_RESTORE[author,report_v,"S"+scenario,"load", statistic] = authors_df[idx]['S'+scenario+'_load_'+statistic]
#################
## RESTORE 5.0 ##
#################
folder = 'RESTORE_5.0/'
# ---------
# Load data
# ---------
# Load SIMID model
SIMID = pd.read_csv(raw_dir+folder+'predictions_v5_UHasselt.csv', parse_dates=['Date'])
SIMID.index = SIMID['Date']
SIMID.pop('Date')
# Load UGent model
UGent = pd.read_csv(raw_dir+folder+'predictions_UGent.csv')
UGent = UGent.rename(columns={UGent.columns[0]:'Date'})
UGent.Date = pd.to_datetime(UGent.Date)
UGent.index = UGent['Date']
UGent.pop('Date')
# Load UNamur model
UNamur = pd.read_csv(raw_dir+folder+'predictions_Unamur_2410.csv')
UNamur.Date = pd.to_datetime(UNamur.Date, format='%d/%m/%y')
UNamur.index = UNamur['Date']
UNamur.pop('Date')
# Load VUB1 model
VUB1 = pd.read_excel(raw_dir+folder+'VUB_HOSPpredictions2610Fagg.xlsx', skiprows=0)
VUB1.Date = pd.to_datetime(VUB1.Date)
VUB1.columns = ['Date','Observations','Fit', 'S1_load_median','S1_load_mean', 'S1_load_LL', 'S1_load_UL']
VUB1.index = VUB1['Date']
VUB1.pop('Date')
# Load VUB2 model
VUB2 = pd.read_excel(raw_dir+folder+'VUB_HOSPpredictions0112.xlsx', skiprows=0)
VUB2.Date = pd.to_datetime(VUB2.Date)
#VUB2.pop("Unnamed: 7")
#VUB2.pop("Unnamed: 8")
#VUB2.pop("Unnamed: 9")
#VUB2.pop("Unnamed: 10")
VUB2.columns = ['Date','Observations','Fit', 'S1_load_median','S1_load_mean', 'S1_load_LL', 'S1_load_UL']
VUB2.index = VUB2['Date']
VUB2.pop('Date')
# -----------
# Assign data
# -----------
authors = ['UGent','SIMID','UNamur','VUB']
authors_df = [UGent, SIMID, UNamur, VUB]
report_v = 'v5.0'
scenarios = ['1','2','3','4']
statistics = ['mean', 'median', 'LL', 'UL']
for idx, author in enumerate(authors):
for scenario in scenarios:
for statistic in statistics:
if author == 'VUB':
if scenario == '1':
df_RESTORE[author,report_v,"S"+scenario+'_2610',"load", statistic] = VUB1['S'+scenario+'_load_'+statistic]
df_RESTORE[author,report_v,"S"+scenario+'_0111',"load", statistic] = VUB2['S'+scenario+'_load_'+statistic]
elif author == 'UGent':
df_RESTORE[author,report_v,"S"+scenario,"incidences", statistic] = authors_df[idx]['S'+scenario+'_incidences_'+statistic]
if statistic != 'median':
df_RESTORE[author,report_v,"S"+scenario,"load", statistic] = authors_df[idx]['S'+scenario+'_load_'+statistic]
else:
df_RESTORE[author,report_v,"S"+scenario,"incidences", statistic] = authors_df[idx]['S'+scenario+'_incidences_'+statistic]
df_RESTORE[author,report_v,"S"+scenario,"load", statistic] = authors_df[idx]['S'+scenario+'_load_'+statistic]
#################
## RESTORE 6.0 ##
#################
folder = 'RESTORE_6.0/'
# ---------
# Load data
# ---------
# Load SIMID model
SIMID = pd.read_csv(raw_dir+folder+'UHasselt_predictions_v6.csv', parse_dates=['Date'])
SIMID.index = SIMID['Date']
SIMID.pop('Date')
# Load UGent model
UGent = pd.read_csv(raw_dir+folder+'UGent_restore_v6.csv', parse_dates=['Date'])
#UGent = UGent.rename(columns={UGent.columns[0]:'Date'})
UGent.Date = pd.to_datetime(UGent.Date)
UGent.index = UGent['Date']
UGent.pop('Date')
# Load UNamur model
UNamur = pd.read_csv(raw_dir+folder+'Unamur_Model_allscenarios_new.csv', parse_dates=['Date'], skipinitialspace=True)
UNamur.Date = pd.to_datetime(UNamur.Date, format='%d/%m/%y')
UNamur.index = UNamur['Date']
UNamur.pop('Date')
# Load VUB model
VUB = pd.read_excel(raw_dir+folder+'VUB_Hosp1412.xlsx', skiprows=0)
VUB.Date = pd.to_datetime(VUB.Date)
VUB.columns = ['Date','Observations','Fit', 'S1a_load_median','S1a_load_mean', 'S1a_load_LL', 'S1a_load_UL']
VUB.index = VUB['Date']
VUB.pop('Date')
# Load ULB model
# Scenario 1a
ULB_1a = pd.read_csv(raw_dir+folder+'S1a_ULB_model_1213.csv')
ULB_1a = ULB_1a.rename(columns={ULB_1a.columns[0]:'Date'})
ULB_1a['Date'] = pd.date_range('2020-03-01', periods=len(ULB_1a.Date), freq='1D')
ULB_1a.index = ULB_1a['Date']
ULB_1a.pop('Date')
# XMas scenario 1
ULBXmas1 = pd.read_csv(raw_dir+folder+'SXmas1_ULB_model_1213.csv')
ULBXmas1 = ULBXmas1.rename(columns={ULBXmas1.columns[0]:'Date'})
ULBXmas1['Date'] = pd.date_range('2020-03-01', periods=len(ULBXmas1.Date), freq='1D')
ULBXmas1.index = ULBXmas1['Date']
ULBXmas1.pop('Date')
# XMas scenario 2
ULBXmas2 = pd.read_csv(raw_dir+folder+'SXmas2_ULB_model_1213.csv')
ULBXmas2 = ULBXmas2.rename(columns={ULBXmas2.columns[0]:'Date'})
ULBXmas2['Date'] = pd.date_range('2020-03-01', periods=len(ULBXmas2.Date), freq='1D')
ULBXmas2.index = ULBXmas2['Date']
ULBXmas2.pop('Date')
# XMas scenario 3
ULBXmas3 = pd.read_csv(raw_dir+folder+'SXmas3_ULB_model_1213.csv')
ULBXmas3 = ULBXmas3.rename(columns={ULBXmas3.columns[0]:'Date'})
ULBXmas3['Date'] = pd.date_range('2020-03-01', periods=len(ULBXmas3.Date), freq='1D')
ULBXmas3.index = ULBXmas3['Date']
ULBXmas3.pop('Date')
# -----------
# Assign data
# -----------
authors = ['UGent','SIMID','UNamur','VUB']
authors_df = [UGent, SIMID, UNamur, VUB]
report_v = 'v6.0'
scenarios = ['1a','2a','2b1','2c1','3']
scenarios_mapped = ['1','2a','2b','2c','3']
statistics = ['mean', 'median', 'LL', 'UL']
for idx, author in enumerate(authors):
for jdx, scenario in enumerate(scenarios):
for statistic in statistics:
if author == 'VUB':
if scenario == '1a':
df_RESTORE[author,report_v,"S"+scenarios_mapped[jdx],"load", statistic] = authors_df[idx]['S'+scenario+'_load_'+statistic]
else:
df_RESTORE[author,report_v,"S"+scenarios_mapped[jdx],"incidences", statistic] = authors_df[idx]['S'+scenario+'_incidences_'+statistic]
df_RESTORE[author,report_v,"S"+scenarios_mapped[jdx],"load", statistic] = authors_df[idx]['S'+scenario+'_load_'+statistic]
author = 'ULB'
authors_df = [ULB_1a, ULBXmas1, ULBXmas2, ULBXmas3]
scenarios = ['S1a', 'SXmas1', 'SXmas2', 'SXmas3']
scenarios_mapped = ['S1','S_Xmas1', 'S_Xmas2', 'S_Xmas3']
for idx, scenario in enumerate(scenarios):
for statistic in statistics:
if statistic != 'median':
df_RESTORE[author,report_v, scenarios_mapped[idx], "incidences", statistic] = authors_df[idx][scenario + '_incidences_' +statistic].rolling(window=7, center=True).mean()
#################
## RESTORE 6.1 ##
#################
folder = 'RESTORE_6.1/'
# ---------
# Load data
# ---------
# Load SIMID model
SIMID = pd.read_csv(raw_dir+folder+'UHasselt_predictions_v6_1.csv', parse_dates=['Date'])
SIMID.index = SIMID['Date']
SIMID.pop('Date')
# Load UGent model
UGent = pd.read_csv(raw_dir+folder+'UGent_restore_v6.1.csv', parse_dates=['Date'])
#UGent = UGent.rename(columns={UGent.columns[0]:'Date'})
UGent.Date = pd.to_datetime(UGent.Date)
UGent.index = UGent['Date']
UGent.pop('Date')
# Load UNamur model
UNamur = pd.read_csv(raw_dir+folder+'Unamur_Model_allscenarios_6.1.csv', parse_dates=['Date'], skipinitialspace=True)
UNamur.Date = pd.to_datetime(UNamur.Date, format='%d/%m/%y')
UNamur.index = UNamur['Date']
UNamur.pop('Date')
# Load VUB model
VUB = pd.read_excel(raw_dir+folder+'VUB_HOSPpredictions150121_Rapport6-1.xlsx', skiprows=0)
VUB.Date = pd.to_datetime(VUB.Date)
#VUB.pop("Unnamed: 7")
#VUB.pop("Unnamed: 8")
#VUB.pop("Unnamed: 9")
#VUB.pop("Unnamed: 10")
#VUB.pop("Unnamed: 11")
VUB.columns = ['Date','Observations','Fit', 'S1_load_median','S1_load_mean', 'S1_load_LL', 'S1_load_UL']
VUB.index = VUB['Date']
VUB.pop('Date')
# -----------
# Assign data
# -----------
authors = ['UGent','SIMID','UNamur','VUB']
authors_df = [UGent, SIMID, UNamur, VUB]
report_v = 'v6.1'
scenarios = ['1','2a','2b','2c']
scenarios_mapped = ['1','2a','2b','2c']
statistics = ['mean', 'median', 'LL', 'UL']
for idx, author in enumerate(authors):
for jdx, scenario in enumerate(scenarios):
for statistic in statistics:
if author == 'VUB':
if scenario == '1':
df_RESTORE[author,report_v,"S"+scenarios_mapped[jdx],"load", statistic] = authors_df[idx]['S'+scenario+'_load_'+statistic]
else:
df_RESTORE[author,report_v,"S"+scenarios_mapped[jdx],"incidences", statistic] = authors_df[idx]['S'+scenario+'_incidences_'+statistic]
df_RESTORE[author,report_v,"S"+scenarios_mapped[jdx],"load", statistic] = authors_df[idx]['S'+scenario+'_load_'+statistic]
#################
## RESTORE 7.0 ##
#################
folder = 'RESTORE_7.0/'
# ---------
# Load data
# ---------
# Load SIMID model
SIMID = pd.read_csv(raw_dir+folder+'UHasselt_predictions_v7_np_full.csv', parse_dates=['Date'])
SIMID.index = SIMID['Date']
SIMID.pop('Date')
# Load UGent model
UGent = pd.read_csv(raw_dir+folder+'UGent_restore_v7.csv', parse_dates=['Date'])
UGent.Date = pd.to_datetime(UGent.Date)
UGent.index = UGent['Date']
UGent.pop('Date')
# Load UNamur model
UNamur = pd.read_csv(raw_dir+folder+'Unamur_Model_allscenarios_70.csv', parse_dates=['Date'], skipinitialspace=True)
UNamur.index = UNamur['Date']
UNamur.pop('Date')
# Load VUB model
VUB = pd.read_excel(raw_dir+folder+'VUB_prediction_March9th.xlsx',skiprows=1)
VUB.Date = pd.to_datetime(VUB.Date, format='%d/%m/%y')
VUB.index = VUB['Date']
VUB.pop('Date')
VUB.pop("Unnamed: 6")
VUB.pop("Unnamed: 7")
VUB.pop("Unnamed: 8")
VUB.pop("Unnamed: 9")
VUB.pop("Unnamed: 10")
VUB.pop("Unnamed: 11")
VUB.pop("Unnamed: 12")
VUB.pop("Unnamed: 13")
VUB = VUB.rename(columns={VUB.columns[2]:'S1b_load_mean'})
VUB = VUB.rename(columns={VUB.columns[3]:'S1b_load_LL'})
VUB = VUB.rename(columns={VUB.columns[4]:'S1b_load_UL'})
# -----------
# Assign data
# -----------
authors = ['UGent','SIMID','UNamur']
authors_df = [UGent, SIMID, UNamur]
report_v = 'v7.0'
scenarios = ['1a','1b','1c','2a','2b','2c','3a','3b','3c','4a','4b','4c']
scenarios_mapped = scenarios
statistics = ['mean', 'median', 'LL', 'UL']
for idx, author in enumerate(authors):
for jdx, scenario in enumerate(scenarios):
for statistic in statistics:
df_RESTORE[author,report_v,"S"+scenarios_mapped[jdx],"incidences", statistic] = authors_df[idx]['S'+scenario+'_incidences_'+statistic]
df_RESTORE[author,report_v,"S"+scenarios_mapped[jdx],"load", statistic] = authors_df[idx]['S'+scenario+'_load_'+statistic]
for statistic in statistics:
if not statistic == 'median':
df_RESTORE['VUB','v7.0','S1b','load',statistic] = VUB['S1b_load_'+statistic]
#################
## RESTORE 8.0 ##
#################
folder = 'RESTORE_8.0/'
# ---------
# Load data
# ---------
# Load SIMID model
SIMID = pd.read_csv(raw_dir+folder+'restore8_SIMID.csv', parse_dates=['Date'])
SIMID.Date = pd.date_range(start='2020-03-01',periods=550)
SIMID.index = SIMID['Date']
SIMID.pop('Date')
# Load UGent model
UGentmodel_or =
|
pd.read_csv(raw_dir+folder+'RESTORE8_UGent_simulations.csv', header=[0,1,2,3,4])
|
pandas.read_csv
|
#%% Importing modules
import smtplib
import pandas as pd
import numpy as np
import datetime as dt
import pandas.stats.moments as st
from pandas import ExcelWriter
import matplotlib.pyplot as plt
import os
import quandl as qd
import seaborn as sns
from scipy.stats import skewnorm as skn
# Function for saving excel files
def save_xls(list_dfs, xls_path, sheet_names):
writer = ExcelWriter(xls_path)
for n, df in enumerate(list_dfs):
df.to_excel(writer, sheet_names[n])
writer.save()
return
#%% Reading in Data
# Reading VIX data from CBOE directly
# VIX is stored as 3 separate files on CBOE's website
# 2004 to present : http://www.cboe.com/publish/scheduledtask/mktdata/datahouse/vixcurrent.csv
# 1990 to 2003 : http://www.cboe.com/publish/scheduledtask/mktdata/datahouse/vixarchive.xls
# 1986 to 2003 VXO: http://www.cboe.com/publish/scheduledtask/mktdata/datahouse/vxoarchive.xls
# First read raw files directly
vix_present = pd.read_csv('http://www.cboe.com/publish/scheduledtask/mktdata/datahouse/vixcurrent.csv').dropna()
# vix_old = pd.read_excel('http://www.cboe.com/publish/scheduledtask/mktdata/datahouse/vixarchive.xls').dropna()
vxo_old = pd.read_excel('http://www.cboe.com/publish/scheduledtask/mktdata/datahouse/vxoarchive.xls').dropna()
# Function for cleaning CBOE VIX data
def clean_cboe(df):
df.columns = ['Date','Open','High','Low','Close']
df = df[1:]
df['Date'] = pd.to_datetime(df['Date'])
df = df.set_index(pd.DatetimeIndex(df['Date']))
return df[['Open','High','Low','Close']]
# Applying clean_cboe to vix data
vix_present = clean_cboe(vix_present)
# vix_old = clean_cboe(vix_old)
vxo_old = clean_cboe(vxo_old)
# Currently the vix_old dataframe doesn't have the Open prices so VXO will be used to proxy VIX prior
# to 2003
vix = pd.concat([vxo_old,vix_present],axis = 0)
#%% Reading SKEW Index data directly from CBOE
skew = pd.read_csv('https://www.cboe.com/publish/scheduledtask/mktdata/datahouse/skewdailyprices.csv')
skew.columns = ['Date','Skew','na1','na2']
skew = skew[1:]
skew['Date'] = pd.to_datetime(skew['Date'])
skew = skew.set_index(pd.DatetimeIndex(skew['Date']))[['Skew']]
skew['skew'] = -(pd.to_numeric(skew['Skew'], downcast='float') - 100)/10
del skew['Skew']
#%% Reading in SPX Data
os.chdir('C:\\Users\\Fang\\Desktop\\Python Trading\\SPX Option Backtester\\spx_options_backtesting\\SPX Data')
spx =
|
pd.read_csv('SPX.csv')
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 9 20:13:44 2020
@author: Adam
"""
#%% Heatmap generator "Barcode"
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import re
def join_cols(row):
return ''.join(list(row))
def find_favor(seq):
t = []
for m in re.finditer(seq, DNA):
t += [m.start()]
return t
DNA = np.loadtxt('./data/DNA.txt', str)
DNA = ''.join(DNA)
print('DNA Length = {} '.format(len(DNA)) )
start_idxs = []
for m in re.finditer('GTC', DNA):
start_idxs += [m.start()]
start_idxs = np.array(start_idxs)
df = pd.DataFrame()
df['loc'] = np.arange(len(DNA))
df['start_ind'] = 0
df.loc[start_idxs,'start_ind'] = 1
favor = pd.read_csv('./data/favor_seqs.csv')
gtc_loc = list(favor.iloc[0,:])[0].find('GTC')
red_idxs = []
for detsize in range(3,4):
dets = favor['seq'].str[ gtc_loc-detsize:gtc_loc + 3 + detsize]
dets = list(np.unique(dets))
detslocs = list(map(find_favor, dets))
detslocs = [x for x in detslocs if len(x) > 1]
for tlocs in detslocs:
mean_dist = np.mean(np.diff(tlocs))
median_dist = np.median(np.diff(tlocs))
if(mean_dist > 1000 and mean_dist < 6000
or
median_dist > 1000 and median_dist < 6000):
red_idxs += [tlocs]
red_idxs = [item for sublist in red_idxs for item in sublist]
plt.figure(figsize=(16,4))
plt.bar(start_idxs, [0.3]*len(start_idxs), width=64, color='black', alpha=0.8)
plt.bar(red_idxs, [1]*len(red_idxs), width=64, color='red')
plt.ylim([0,1])
plt.xlim([0,len(DNA)])
plt.xlabel('DNA nucleotide index')
plt.yticks([])
plt.xticks([])
plt.title('\"Intresting\" Sequences')
plt.legend(['GTC Locations','Intresting Frequency Locations'], facecolor=(1,1,1,1), framealpha=0.98 )
plt.savefig('./out/favor_seqs_k_3.png')
plt.show()
#%% Prim VS Primon when POLY is saturated
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
print('\n=====================================================\n')
def mms(t):
if(np.max(t) - np.min(t) > 0):
t = (t - np.min(t))/(np.max(t) - np.min(t))
else:
t = (t)/(np.max(t))
return t
def ms(t):
return t/np.max(t)
def nucs2seq(row):
row = list(row)
t = ''.join(row)
return t
# Heatmap for favorite seqs vs all gtc containing seqs
df = pd.read_csv('./data/chip_B.csv')
df_favor = pd.read_csv('./data/favor_seqs.csv')
df['seq'] = list(map( nucs2seq, np.array(df.iloc[:,:-4]) ))
tcols = df.columns
tcols = list(tcols[:-4]) + ['poly','prim','primo','seq']
df.columns = tcols
df['primo-prim'] = df['primo'] - df['prim']
labels = ['poly','primo','prim','primo-prim']
df = df.sort_values('poly').reset_index(drop=True)
sm = 100
plt.figure(figsize=(12,8))
for i, lab in enumerate(labels):
plt.subplot(4,1,i+1)
if(i != 3):
df = df.sort_values(lab).reset_index(drop=True)
y = df[lab].copy()
if(i != 3):
y = mms( y )**0.5
y = y.rolling(sm).mean().drop(np.arange(sm)).reset_index(drop=True)
y = pd.Series(y)
plt.plot(np.arange(len(y)),y, alpha=0.8)
plt.title(lab + ' sorted by self')
plt.ylabel(' ln(score)' )
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=1)
#%% Collect favorite sequences
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
print('\n=====================================================\n')
labels = ['poly','primo','prim']
def mms(t):
if(np.max(t) - np.min(t) > 0):
t = (t - np.min(t))/(np.max(t) - np.min(t))
else:
t = (t)/(np.max(t))
return t
def nucs2seq(row):
row = list(row)
t = ''.join(row)
return t
# Heatmap for favorite seqs vs all gtc containing seqs
df = pd.read_csv('./data/chip_B.csv')
df_favor = pd.read_csv('./data/favor_seqs.csv')
df['seq'] = list(map( nucs2seq, np.array(df.iloc[:,:-3]) ))
# keep favorite seuqnces (1000~6000 reps)
df_test = pd.read_csv('./data/validation.csv')
df.index = df['seq']
df = df.loc[df_favor['seq'],:]
df = df.dropna(axis=0).reset_index(drop=True)
df.columns = list(df.columns[:-4]) + ['poly', 'prim', 'primo', 'seq']
# keep non test set sequences
toDrop = df_test['seq']
df.index = df['seq']
df = df.drop(toDrop, axis=0, errors='ignore')
df = df.reset_index(drop=True)
print('lets unite the data by seq and watch the mean and std of each sequence')
dfm = pd.DataFrame()
dfm['primo'] = mms(df.groupby('seq').median()['primo'])
dfm['primo_std'] = mms(df.groupby('seq').std()['primo'])#/mms( df.groupby('seq').mean()['primo'] )
dfm['prim'] = mms(df.groupby('seq').median()['prim'])
dfm['prim_std'] = mms(df.groupby('seq').std()['prim'])#/mms( df.groupby('seq').mean()['poly'] )
dfm['poly'] = mms(df.groupby('seq').median()['poly'])
dfm['poly_std'] = mms(df.groupby('seq').std()['poly'])#/mms( df.groupby('seq').mean()['poly'] )
dfm['seq'] = dfm.index
dfm = dfm.reset_index(drop=True)
T1 = np.percentile(dfm['primo'], 95)
T2 = np.percentile(dfm['primo_std'], 90)
T3 = np.percentile(dfm['prim'], 95)
T4 = np.percentile(dfm['prim_std'], 90)
T5 = np.percentile(dfm['poly'], 95)
T6 = np.percentile(dfm['poly_std'], 90)
print('length of dfm before outlier cleaning = {}'.format(len(dfm)) )
dfm = dfm.drop(np.where(dfm['primo'] > T1 )[0]).reset_index(drop=True)
dfm = dfm.drop(np.where(dfm['primo_std'] > T2 )[0]).reset_index(drop=True)
dfm = dfm.drop(np.where(dfm['prim'] > T3 )[0]).reset_index(drop=True)
dfm = dfm.drop(np.where(dfm['prim_std'] > T4 )[0]).reset_index(drop=True)
dfm = dfm.drop(np.where(dfm['poly'] > T5 )[0]).reset_index(drop=True)
dfm = dfm.drop(np.where(dfm['poly_std'] > T6 )[0]).reset_index(drop=True)
print('length of dfm after outlier cleaning = {}'.format(len(dfm)) )
nucs = np.array(list(map(list, dfm['seq']))).copy()
nucs = pd.DataFrame(nucs.copy())
nucs = nucs.add_suffix('_nuc')
nucs = nucs.reset_index(drop=True)
dfm = pd.concat([dfm, nucs], axis=1)
dfm = dfm.reset_index(drop=True)
toKeep = [x for x in dfm.columns if 'std' not in x]
dfm = dfm.loc[:,toKeep]
for lab in labels:
dfm.loc[:,lab] = mms(dfm.loc[:,lab])
for lab in labels:
dfm.loc[:,lab] = mms(dfm.loc[:,lab]**0.5)
dfm.to_csv('data/chip_B_favor.csv', index=False)
#%% Heatmap of ABS Correlation
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
def mms(t):
if(np.max(t) - np.min(t) > 0):
t = (t - np.min(t))/(np.max(t) - np.min(t))
else:
t = (t)/(np.max(t))
return t
def count_letters(df_nucs, rep_dict):
X = df_nucs.copy()
X = X.replace(rep_dict)
X = np.array(X)
X = np.sum(X,1)
return X
df = pd.read_csv('data/chip_B_favor.csv')
cols = df.columns
cols = [x for x in cols if 'nuc' in x]
df_nucs = df.loc[:,cols].copy()
df_labels = df.loc[:,['primo','prim','poly']]
df_res = pd.DataFrame()
# count appereances of each individual letter
for letter in ['A','C','G','T']:
rep_dict = {'A':0,'C':0,'G':0,'T':0}
rep_dict[letter] = 1
df_res['{}_count'.format(letter) ] = count_letters(df_nucs, rep_dict)
gtc_ind_start = ''.join( list(df_nucs.iloc[0,:]) ).find('GTC') - 5
gtc_ind_end = gtc_ind_start + 5 + 3 + 5
# extract puryn and prymidin densities
# A,<NAME>
# C,T Prymidins
""" =================== Left Side Count =============================== """
rep_dict = {'A':1,'C':0,'G':1,'T':0}
df_res['Left_Pur_count'] = count_letters(df_nucs.iloc[:,:gtc_ind_start], rep_dict)
rep_dict = {'A':0,'C':1,'G':0,'T':1}
df_res['Left_Pry_count'] = count_letters(df_nucs.iloc[:,:gtc_ind_start], rep_dict)
""" =================== Center / Determinant Count ===================== """
rep_dict = {'A':1,'C':0,'G':1,'T':0}
df_res['Center_Pur_count'] = count_letters(df_nucs.iloc[:,gtc_ind_start:gtc_ind_start], rep_dict)
rep_dict = {'A':0,'C':1,'G':0,'T':1}
df_res['Center_Pry_count'] = count_letters(df_nucs.iloc[:,gtc_ind_start:gtc_ind_start], rep_dict)
""" =================== Right Side Count =============================== """
rep_dict = {'A':1,'C':0,'G':1,'T':0}
df_res['Right_Pur_count'] = count_letters(df_nucs.iloc[:,gtc_ind_end:], rep_dict)
rep_dict = {'A':0,'C':1,'G':0,'T':1}
df_res['Right_Pry_count'] = count_letters(df_nucs.iloc[:,gtc_ind_end:], rep_dict)
df_res = pd.concat([df_res, df_labels], axis=1)
plt.figure(figsize=(12,8))
df_corr = (df_res.corr().abs())
sns.heatmap(df_corr, cmap="bwr")
plt.title('Absolute Correlation')
plt.show()
plt.figure(figsize=(12,8))
df_corr = df_corr.loc[['primo','prim','poly'],['primo','prim','poly']]
sns.heatmap(df_corr, cmap="bwr")
plt.title('Absolute Correlation')
plt.show()
#%% K mers spectrum
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from itertools import product
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
from scipy.stats import entropy
NMERS = [1,2,3]
df = pd.read_csv('./data/chip_B_favor.csv')
labels = ['primo','prim','poly']
np.random.RandomState(42)
df.index = df['seq']
m2 = 'CCACCCCAAAAAACCCCGTCAAAACCCCAAAAACCA'
df.loc[m2,'primo']
im = plt.imread(r'C:\Users\Ben\Desktop/Picture1.png')
x = list(range(1,14))
y = [1,
0,
0.4,
0.6,
0.47,
0.13,
0.2,
0.3,
0.5,
0.46,
0.5,
0.67,
0.8]
x= np.array(x)
y= np.array(y)
plt.imshow(im)
plt.scatter(x,y, c='red')
#for col in labels:
#df = df.drop(np.where(df[col] > np.percentile(df[col],95))[0],axis=0).reset_index(drop=True)
#df = df.drop(np.where(df[col] < np.percentile(df[col],5))[0],axis=0).reset_index(drop=True)
def mms(t):
t = (t - np.min(t))/(np.max(t) - np.min(t))
return t
for col in labels:
df[col] = mms(df[col])
df[col] = np.round(df[col]*2)
df[col] = df[col].replace({0:'0weak',1:'1medium',2:'2strong'})
plt.figure(figsize=(18,16))
for i, N in enumerate(NMERS):
letters = ['A','C','G','T']
exec('combs_list = list(product(' + 'letters,'*N + '))')
combs_list = list(map(''.join,combs_list))
df_mer = pd.DataFrame(np.zeros([len(df), len(combs_list)]))
df_mer.columns = combs_list
mers = df['seq'].apply(lambda seq: [ seq[i:i+N] for i in range(2, len(seq)-1, 1) ])
mers = (np.array(list(mers)).reshape([len(mers),len(mers[0])]))
mers = pd.DataFrame(mers)
#coutn mers
for comb in combs_list:
comb_sum = np.sum(mers == comb,axis=1)
df_mer.loc[:,comb] = comb_sum
df_mer = np.sum(df_mer)
df_mer = df_mer/np.sum(df_mer)
df_mer = df_mer[(df_mer >= 0.01 )]
plt.subplot(len(NMERS),1,i+1)
plt.scatter(np.arange(len(df_mer)), df_mer, color=(['blue','red','green'])[i] )
plt.xticks(np.arange(len(df_mer)), df_mer.index, rotation=90)
#plt.legend([' Variance: {}'.format( np.var(df_mer)) ])
plt.title('{}-Mer'.format(N) )
plt.ylim([0, 0.3])
plt.ylabel('mer frequency')
#%% K-MEANS and Hirarchial clustering
"""
Dendogram
Plot By TSNE
"""
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
NLIST = [5]
labels = ['poly','prim','primo']
labels = ['primo']
ShowTextOnDendogram = True
showKM = True
showHC = False
def mms(t):
t = (t - np.min(t))/(np.max(t) - np.min(t))
return t
def OHE(df):
cols = []
for i in range(36):
for letter in ['A','C','G','T']:
cols += [ str(i+1) + '_nuc_' + letter]
tdf = pd.get_dummies(df)
toAdd = np.setdiff1d(cols, tdf.columns)
for col in toAdd:
tdf[col] = 0
for col in cols:
tdf[col] = tdf[col].astype(int)
tdf = tdf.loc[:,cols]
return tdf
df = pd.read_csv('./data/chip_B_favor.csv')
df = pd.concat([OHE(df.drop(labels,axis=1)), df.loc[:,labels]], axis=1)
df_backup = df.copy()
# =============================================================================
# Hirarchical Clustering
# =============================================================================
from scipy.cluster import hierarchy
if(showHC):
#WORKS FINE
X = df_backup.drop(labels,axis=1).copy()
X = X.iloc[:,:].reset_index(drop=True)
Z = hierarchy.linkage(X, method='ward')
Z = pd.DataFrame(Z)
botline = Z.iloc[np.argmax(np.diff(Z.iloc[:,-2])),-2] * 1.05
topline = Z.iloc[np.argmax(np.diff(Z.iloc[:,-2])) + 1, -2] * 0.95
fig = plt.figure(figsize=(4, 6))
dn = hierarchy.dendrogram(Z, p=7, truncate_mode='level', color_threshold=40, distance_sort=True)
plt.hlines([botline, topline], xmin=0, xmax=len(Z), ls='--', alpha = 0.9 )
plt.ylabel('Ward Distance')
disticks = np.unique(np.sqrt(Z.iloc[:,-2]).astype(int))
#plt.yticks( disticks**2 , disticks)
plt.xticks([])
plt.xlabel('')
Z = hierarchy.linkage(X, method='ward')
X[labels] = df_backup[labels].copy()
thr = 40
dists = [ 20, 40, 80, 120]
fntsze = 22
thr = 40
for i, thr in enumerate(dists):
Xg = X.copy()
Xg['bin'] = hierarchy.fcluster(Z, thr, criterion='distance', depth=5, R=None, monocrit=None)
Xres = Xg.groupby('bin').sum()
Xres[labels] = Xg.groupby('bin').median()[labels]
xcount = Xg.copy()
xcount['count'] = 1
xcount = xcount.groupby('bin').sum()['count']
xcnew = [xcount.iloc[0]/2]
for j in xcount.index[1:]:
xcnew += [np.sum(xcount[:j-1]) + xcount[j]/2]
xcount = pd.Series( xcnew )
xcount.index = xcount.index + 1
#plt.subplot(4,1, i+1 )
#plt.scatter(Xres.index, Xres[labels])
toKeep = [x for x in X.drop(labels, axis=1).columns if '36' not in x]
Xres = (Xres.loc[:,toKeep])
Xres.columns = [x[-1] for x in Xres.columns]
Xres = Xres.T
Xres = Xres.groupby(Xres.index).sum()
for col in Xres.columns:
Xres[col] = Xres[col] / np.sum(Xres[col])
Xres = Xres.T
row_idx = 1
for row_idx in Xres.index:
row = Xres.loc[row_idx,:]
print(
xcount.iloc[row_idx-1]
)
accumsize = 0
for dx, lett in enumerate(row.index):
x_rng = plt.gca().get_xlim()[1]
# =============================================================================
# # ADDING TEXT TO DENDOGRAM
# =============================================================================
if(ShowTextOnDendogram == True):
plt.text(x= xcount.iloc[row_idx-1]*x_rng/len(Xg) + accumsize,
y=thr, horizontalalignment='left',
s=lett, fontsize=np.max([fntsze*row[lett], 6]) ,
weight='normal', fontname='arial')
accumsize += np.max([fntsze*row[lett], 8]) + 36
#% TODO MAKE THIS PRETTY
from sklearn.metrics import silhouette_score
res_ss = []
xvec = [5]
for i in xvec:
X = df.copy().drop(['bin'], axis=1, errors='ignore')
X = X.drop(labels, axis=1)
tmp_ss = []
for j in range(1):
km = KMeans(i, random_state=j )
y = km.fit_predict(X)
ss = silhouette_score( X, y )
tmp_ss += [ss]
print('sil score => mean: {} | std: {}'.format(np.mean(tmp_ss), np.std(tmp_ss)) )
res_ss += [np.mean(tmp_ss)]
plt.figure()
plt.scatter(xvec,res_ss)
plt.xlabel('K-Value')
plt.ylabel('Sil Score')
plt.show()
if(showKM):
col = 'primo'
plt.figure(figsize=(6,4))
for i, Nbins in enumerate(NLIST):
df = df_backup.copy()
km = KMeans(Nbins, random_state=42 )
df['bin'] = km.fit_predict(df.drop(labels,axis=1))
cc = np.array(km.cluster_centers_).reshape(km.cluster_centers_.shape[0],
km.cluster_centers_.shape[1]//4,
4)
cc = np.array(pd.DataFrame(np.argmax(cc,axis=2)).replace({0:'A',1:'C',2:'G',3:'T'}))
centers = [''.join(l) for l in cc]
tdf = df.loc[:,['bin',col]]
#rep_d = {0:'A',1:'B',2:'C',3:'D',4:'E'}
rep_d = {0:2,1:3,2:0,3:1,4:4}
df['bin'] = df['bin'].replace(rep_d)
centers = list(np.array(centers)[list(rep_d.values())])
print('Mean Words:')
print(centers)
#rep_d = {'A':2,'B':3,'C':0,'D':1,'E':4}
#df['bin'] = df['bin'].replace(rep_d)
plt.subplot(len(NLIST),1,i+1)
sns.violinplot(x="bin", y=col, data=df, palette="Blues", cut=0)
plt.ylim([-0.2, 1.2])
plt.ylabel('Primase \nBinding Scores', fontsize=12)
plt.title('Scores Distribution by Cluster', fontsize=12)
"""
for tx, tcent in zip(np.arange(np.max(tdf['bin'])+1) , centers):
chunks, chunk_size = len(tcent), len(tcent)//6
stlist = [ tcent[i:i+chunk_size] for i in range(0, chunks, chunk_size) ]
tcent = '\n'.join(stlist)
t = plt.text(x=tx-0.5, y=0, s=tcent, fontsize=10, color='red', fontweight='normal', backgroundcolor='white')
t.set_bbox(dict(facecolor='white', alpha=0.7, edgecolor='white'))
plt.xlim([-1, Nbins-1 + 0.5])
"""
#plt.xticks( np.arange(np.max(tdf['bin'])+1)
#,centers , rotation=-90, fontsize=12)
plt.yticks( [0,0.25,0.5,0.75,1], fontsize=12 )
plt.tight_layout()
plt.savefig('./out/kmeans/forpaper_B_centroids_' + str(Nbins) + 'bins')
plt.show()
#plt.close()
#%% PCA
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from itertools import product
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
NMERS = [3]
df = pd.read_csv('./data/chip_B_favor.csv')
#labels = ['primo','prim','poly']
labels = ['primo']
np.random.RandomState(42)
def mms(t):
t = (t - np.min(t))/(np.max(t) - np.min(t))
return t
"""
for col in labels:
df[col] = mms(df[col])
df[col] = np.round(df[col]*2)
df[col] = df[col].replace({0:'0weak',1:'1medium',2:'2strong'})
"""
for N in NMERS:
letters = ['A','C','G','T']
exec('combs_list = list(product(' + 'letters,'*N + '))')
combs_list = list(map(''.join,combs_list))
df_mer = pd.DataFrame(np.zeros([len(df), len(combs_list)]))
df_mer.columns = combs_list
mers = df['seq'].apply(lambda seq: [ seq[i:i+N] for i in range(2, len(seq)-1, 1) ])
mers = (np.array(list(mers)).reshape([len(mers),len(mers[0])]))
mers = pd.DataFrame(mers)
#coutn mers
for comb in combs_list:
comb_sum = np.sum(mers == comb,axis=1)
df_mer.loc[:,comb] = comb_sum
pca = PCA(n_components=np.min([16,len(df_mer.columns)]), svd_solver='auto', random_state=42)
df_mer = pd.DataFrame(pca.fit_transform(df_mer.dropna(axis=1)))
df_mer = df_mer.add_prefix('pc')
#MMS -1 1
for col in df_mer.columns:
df_mer[col] = mms(df_mer[col])
for col in labels:
df_mer[col] = df[col]
np.cumsum(pca.explained_variance_ratio_)
1/0
# 3D scatter
for lab in labels:
fig = plt.figure(figsize=(14,10))
ax = fig.add_subplot(111, projection='3d')
x = df_mer['pc0']
y = df_mer['pc1']
z = df_mer['pc2']
clrs = mms( (df_mer[lab]) )
ax.scatter3D(2*x + 0.05*np.random.randn(len(x)) ,
2*y + 0.05*np.random.randn(len(y)) ,
2*z + 0.05*np.random.randn(len(z)) ,
alpha=0.6, c=clrs, cmap='bwr')
plt.xlabel('pc0')
plt.ylabel('pc1')
ax.set_zlabel('pc2')
plt.title('{}: {}-mer projection'.format(lab,N) )
plt.show()
""" PUT A COMMENT TO SEE 3D Projection """
#plt.close()
fig = plt.figure(figsize=(14,10))
x = df_mer['pc0']
y = df_mer['pc1']
plt.scatter( x-0.5, #+ 0.05*np.random.randn(len(x)) ,
y-0.5, #+ 0.05*np.random.randn(len(y)) ,
alpha=0.6, c=clrs, cmap='bwr' )
plt.xlabel('pc0')
plt.ylabel('pc1')
plt.title('{}: {}-mer projection'.format(lab,N) )
plt.savefig('./out/pca/{}_{}mer'.format(lab,N) )
plt.show()
""" PUT A COMMENT TO SEE 2D Projection """
#plt.close()
#%% Dynamic clustering and prediction
"""
This techinique invloves all of our research,
by using PCA we learn the existence of 5 clusters,
by using kmeans we classify each sequence to its cluster,
by using regressors suchj as lasso we train a model for each cluster
and predict labels with high resolution.
we can compare results with or without dynamic clustering.
"""
"""
Dendogram
Plot By TSNE
"""
import os
os.chdir(r'C:\Users\Ben\Desktop\T7_primase_Recognition_Adam\adam\paper\code_after_meating_with_danny')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
import pickle
from time import clock, sleep
[plt.close() for x in plt.get_fignums()]
N = 3
with_clustering = True
stime = clock()
#labels = ['poly','prim','primo']
labels = ['primo']
def OHE(df):
cols = []
for i in range(36):
for letter in ['A','C','G','T']:
cols += [ str(i+1) + '_nuc_' + letter]
tdf = pd.get_dummies(df)
toAdd = np.setdiff1d(cols, tdf.columns)
for col in toAdd:
tdf[col] = 0
for col in cols:
tdf[col] = tdf[col].astype(int)
tdf = tdf.loc[:,cols]
return tdf
df = pd.read_csv('./data/chip_B_favor.csv')
df = pd.concat([OHE(df.drop(labels,axis=1)), df.loc[:,labels]], axis=1)
# apply KMEANS
km = KMeans(5, random_state=42, n_init=20 )
bins_pred = km.fit_predict(df.drop(labels,axis=1))
pickle.dump(km, open('./out/regressors/models/km.sav' , 'wb') )
t = km.cluster_centers_
cc = np.array(km.cluster_centers_).reshape(km.cluster_centers_.shape[0],
km.cluster_centers_.shape[1]//4, 4)
cc = np.array(pd.DataFrame(np.argmax(cc,axis=2)).replace({0:'A',1:'C',2:'G',3:'T'}))
centers = [''.join(l) for l in cc]
df =
|
pd.read_csv('./data/chip_B_favor.csv')
|
pandas.read_csv
|
import pyprind
import os
import tarfile
import pandas as pd
import numpy as np
with tarfile.open('aclImdb_v1.tar.gz', 'r:gz') as tar:
tar.extractall()
basepath = 'aclImdb'
labels = {'pos': 1, 'neg': 0}
pbar = pyprind.ProgBar(50000)
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from vc.definitions import ROOT_DIR
####################################################################
# Common variables.
####################################################################
# Get the the root dir of the module.
# Folder path to data files.
folder_path = ROOT_DIR + "/datasets/temp_brazil_cities/raw_data/"
# Set the year you want to look at.
year = 1977
####################################################################
# Load and clean data for each city individually.
####################################################################
# Load data into Pandas DataFrame with first row as column names and first column as index names.
belem_df = pd.read_csv(folder_path + "station_belem.csv", header=0, index_col=0)
# Remove pre-generated average columns.
belem_df = belem_df.drop(["D-J-F", "M-A-M", "J-J-A", "S-O-N", "metANN"], axis=1)
# Set erroneous values to NaN so they don't disturb the results.
belem_df[belem_df > 100] = np.nan
curitiba_df =
|
pd.read_csv(folder_path + "station_curitiba.csv", header=0, index_col=0)
|
pandas.read_csv
|
from sklearn.model_selection import train_test_split
import argparse
import jsonlines
import json
import pandas as pd
from collections import Counter
import random
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='climate-fever-dataset-r1.jsonl')
parser.add_argument('--pool_size', type=str, default='small')
parser.add_argument('--output_path', type=str, default= '.')
args = parser.parse_args()
def flatten(dataset, unique=True):
claims = []
evidences=[]
labels=[]
idx=[]
i=0
if unique:
for data in dataset:
claims.append(data['claim'].strip('\"'))
evidences.append(data['evidences'][0]['evidence'].strip('\"'))
labels.append(data['evidences'][0]['evidence_label'])
idx.append(i)
i+=1
else:
for data in dataset:
for evidence in data['evidences']:
claims.append(data['claim'].strip('\"'))
evidences.append(evidence['evidence'].strip('\"'))
labels.append(evidence['evidence_label'])
idx.append(i)
i+=1
flattened_dataset = {'claim':claims, 'evidence':evidences, 'label':labels, 'id':idx}
print(max(([len(e.split(' ')) for e in evidences])))
c=Counter(labels)
min_count = min(c.values())
print(c)
return pd.DataFrame.from_dict(flattened_dataset), min_count
def small_pool():
#read
dataset = []
with open(args.dataset, encoding='utf-8') as f:
for line in f:
dataset.append(json.loads(line))
# print(dataset[0], type(dataset))
dataset, n=flatten(dataset)
#
# #balance
S = dataset[dataset.label == "SUPPORTS"].sample(n, random_state=42)
N = dataset[dataset.label == "NOT_ENOUGH_INFO"].sample(n, random_state=42)
C = dataset[dataset.label == "REFUTES"].sample(n, random_state=42)
print(len(S), len(N), len(C))
# #split
df_train = pd.DataFrame()
df_val = pd.DataFrame()
df_test = pd.DataFrame()
for set in [S, N, C]:
train, test = train_test_split(set, test_size=0.33, random_state=42)
val, test = train_test_split(test, test_size=0.5, random_state=42)
df_train = pd.concat([df_train, train])
df_val =
|
pd.concat([df_val, val])
|
pandas.concat
|
import requests
import os
from datetime import date, datetime, timedelta as td
import matplotlib.dates as mdates
import pandas as pd
import numpy as np
import random
############ Data Munging ############
def time_dataframe_prep(df, start_date, end_date, start_date_column, end_date_column, category_column):
"""
Returns an exploded dataframe, with every minute labeled with the event name or 'no entry'.
Parameters
----------
df : dataframe
A dataframe that contains tagged timstamps
start_date : str
Date of first entry
end_date :str
Date of last entry
start_date_column : datetime
Column that contains when the event started
end_date_column : datetime
Column that contains when the event ended
category_column : str
Column that contains the event tag name
Returns
-------
df_minutes_se : dataframe
Table with every minute tagged
"""
########################
## Step 1: Create a dataframe of just the end dates
########################
df_end = df[[end_date_column]].copy()
# Add a column for 'no entry'
df_end[category_column] = 'no entry'
# If there is no gap in data (as in there is an entry immediately following the previous),
# remove the record from the df_end dataframe
start_date_pt_list = list(df[start_date_column].unique())
df_end = df_end[~df_end[end_date_column].isin(start_date_pt_list)]
########################
## Step 2: Combine End and Start Dates into single dataframe
########################
# Create a two column data frame with the start date and the category
df_start = df[[start_date_column, category_column]].copy()
# Update column names to match that of df_start
df_end.rename(columns = {end_date_column: start_date_column}, inplace = True)
# Append the df_end dataframe to the bottom
df_entries = pd.concat([df_start, df_end])
########################
## Step 3: Expand Dataset - Every Second
########################
# Create a dataframe of second intevals between two dates
time_range = pd.date_range(start_date, end_date, freq= '1s')
time_range_df = pd.DataFrame(time_range).rename(columns = {0: 'date_time'})
# Convert to time
time_range_df['date_time'] =
|
pd.to_datetime(time_range_df['date_time'])
|
pandas.to_datetime
|
"""
个股资料
"""
from cnswd.setting.config import POLL_FREQUENCY, TIMEOUT
from cnswd._seleniumwire import make_headless_browser
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from retry import retry
import logbook
import time
import sys
import random
import pandas as pd
logbook.StreamHandler(sys.stdout).push_application()
logbook.set_datetime_format("local")
logger = logbook.Logger('同花顺')
url_fmt = "http://stockpage.10jqka.com.cn/{code}/{item}/"
class THSStock(object):
"""同花顺个股信息api"""
_loaded = {}
# 子类定义读取表属性
item = ''
def __init__(self):
self.logger = logger
self.logger.info("创建无头浏览器")
self.driver = make_headless_browser()
self.wait = WebDriverWait(self.driver, 60, POLL_FREQUENCY)
def __enter__(self):
return self
def __exit__(self, *args):
self.driver.delete_all_cookies()
self.driver.quit()
def _load_page(self, code):
"""加载网页"""
if self._loaded.get((code, self.item), False):
return
current_url = url_fmt.format(code=code, item=self.item)
self.logger.info(f"加载:{current_url}")
self.driver.get(current_url)
for r in self.driver.requests:
print(r.path)
request = self.driver.wait_for_request(current_url)
if request.response.status_code != 200:
raise ValueError(request.response.reason)
self._loaded[(code, self.item)] = True
def _parse_href_in_table(self, step=2):
"""解析表内a元素的网址"""
# 表内可能有多个链接,需要间隔选取
# 2 :√×√×
# 3 :√××√××
css = 'td a[href]'
ls = self.driver.find_elements_by_css_selector(css)
res = {}
for e in ls[::step]:
res[e.text] = e.get_attribute('href')
return res
def read_page_table(self):
"""读取指定属性所在页的表数据"""
attrs = self.table_attrs
return pd.read_html(self.driver.page_source, attrs=attrs)[0]
def read_pages_table(self):
"""连续读取多页表数据"""
dfs = []
page_num = self.get_page_num()
first_df = self.read_page_table()
if page_num == 1:
dfs = [first_df]
else:
for p in range(2, page_num+1):
self._change_page_to(p)
path = f'page/{p}'
self._wait_path_loading(path)
df = self.read_page_table()
dfs.append(df)
dfs.insert(0, first_df)
# 由于采用简单路径定位,为防止后续定位误判,读取完成后
# 务必删除所有请求
del self.driver.requests
return
|
pd.concat(dfs)
|
pandas.concat
|
import string
import numpy as np
import pandas as pd
import scipy.stats as ss
import biclust_comp.analysis.accuracy_utils as acc_utils
import biclust_comp.analysis.plots as plots
def format_fraction_failing(row):
expected_runs = row['size']
successful_runs = row['count']
prefix = ""
suffix = ""
#if expected_runs != successful_runs:
# prefix += '\textit{'
# suffix += '}'
if successful_runs == 0 and expected_runs > 0:
prefix += '\textbf{'
suffix += '}'
return f"{prefix}{expected_runs - successful_runs} / {expected_runs}{suffix}"
def add_midrules_latex(latex, indices, line_to_add='\midrule'):
lines = latex.splitlines()
# We have to insert lines from largest index to smallest so we don't mess up indexing
for index in sorted(indices, reverse=True):
lines.insert(index, line_to_add)
return '\n'.join(lines)
def output_latex_table_failure_counts(input_df,
output_txt,
name=None):
error_df =
|
pd.read_csv(input_df, index_col=None)
|
pandas.read_csv
|
import pymzml
from tqdm import tqdm
import numpy as np
from scipy.integrate import simps
import pandas as pd
import peakutils
import glob
from pathlib import Path
import scipy
import pickle
import os
import re
import pyisopach
from scipy import special
import itertools
import urllib
import json
# Modeling modules
# from tensorflow import keras
# 20210922 note: deal with feature extraction accuracy with multiple peaks:
# ms_chromatogram_list, mz_gen, peak_pick need to go through
# mss-mzml_test folder, 1Dexposure1_1.mzML to test 299.1765 10ppm
# *reading external data
this_dir, this_filename = os.path.split(__file__)
Model_file_t = os.path.join(this_dir, 'rfmodel_tuned.pkl')
# Switch pickle? ONNX?
rf_model_t = pickle.load(open(Model_file_t, 'rb'))
Pmodel = rf_model_t
# Read in formula database **
Formula_file = os.path.join(this_dir, '100-500.csv')
cfg =
|
pd.read_csv(Formula_file, index_col=0)
|
pandas.read_csv
|
"""
Author: <NAME>
Create on: 2021-10-23
This module combines data from different sources into one dataframe.
"""
import pandas as pd
import numpy as np
import trading_strategy.data_crawler as crawler
from functools import reduce
from datetime import date
pd.set_option('display.max_columns', None) # enable to see all columns
pd.set_option('display.max_rows', None) # enable to see all columns
def get_pandemic_data():
# Covid 19 data
us_case_df = pd.read_csv('../trading_strategy_data/us_covid_19_data/us_daily_case_trends.csv')
us_case_df = us_case_df.drop(columns=['State'])
us_death_df = pd.read_csv('../trading_strategy_data/us_covid_19_data/us_daily_death_trends.csv')
us_death_df = us_death_df.drop(columns=['State'])
# Change date format
us_case_df['Date'] =
|
pd.to_datetime(us_case_df.Date)
|
pandas.to_datetime
|
"""
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import statsmodels.api as sm
from statsmodels.iolib.summary2 import _df_to_simpletable
from statsmodels.stats.outliers_influence import variance_inflation_factor
from brightics.common.repr import BrtcReprBuilder
from brightics.common.repr import strip_margin
from brightics.common.repr import plt2MD
from brightics.common.groupby import _function_by_group
from brightics.common.utils import check_required_parameters
from brightics.common.utils.table_converters import simple_tables2df_list
from brightics.function.utils import _model_dict
from brightics.function.extraction import one_hot_encoder
from brightics.common.validation import validate
from brightics.common.validation import greater_than_or_equal_to
from brightics.common.utils import get_default_from_parameters_if_required
from brightics.common.classify_input_type import check_col_type
def linear_regression_train(table, group_by=None, **params):
params = get_default_from_parameters_if_required(params, _linear_regression_train)
param_validation_check = [greater_than_or_equal_to(params, 1, 'vif_threshold')]
validate(*param_validation_check)
check_required_parameters(_linear_regression_train, params, ['table'])
if group_by is not None:
grouped_model = _function_by_group(_linear_regression_train, table, group_by=group_by, **params)
return grouped_model
else:
return _linear_regression_train(table, **params)
def _linear_regression_train(table, feature_cols, label_col, fit_intercept=True, is_vif=False, vif_threshold=10):
feature_names, features = check_col_type(table, feature_cols)
label = table[label_col]
if fit_intercept == True:
features = sm.add_constant(features, has_constant='add')
lr_model_fit = sm.OLS(label, features).fit()
else:
lr_model_fit = sm.OLS(label, features).fit()
predict = lr_model_fit.predict(features)
residual = label - predict
summary = lr_model_fit.summary()
summary_tables = simple_tables2df_list(summary.tables, drop_index=True)
summary0 = summary_tables[0]
summary1 = summary_tables[1]
if type(features) != type(table):
features = pd.DataFrame(features)
if is_vif:
summary1['VIF'] = [variance_inflation_factor(features.values, i) for i in range(features.shape[1])]
summary1['VIF>{}'.format(vif_threshold)] = summary1['VIF'].apply(lambda _: 'true' if _ > vif_threshold else 'false')
summary.tables[1] = _df_to_simpletable(summary1)
summary2 = summary_tables[2]
html_result = summary.as_html()
plt.figure()
plt.scatter(predict, label)
plt.xlabel('Predicted values for ' + label_col)
plt.ylabel('Actual values for ' + label_col)
x = predict
p1x = np.min(x)
p2x = np.max(x)
plt.plot([p1x, p2x], [p1x, p2x], 'r--')
fig_actual_predict = plt2MD(plt)
plt.figure()
plt.scatter(predict, residual)
plt.xlabel('Predicted values for ' + label_col)
plt.ylabel('Residuals')
plt.axhline(y=0, color='r', linestyle='--')
fig_residual_1 = plt2MD(plt)
plt.figure()
sm.qqplot(residual, line='s')
plt.ylabel('Residuals')
fig_residual_2 = plt2MD(plt)
plt.figure()
sns.distplot(residual)
plt.xlabel('Residuals')
fig_residual_3 = plt2MD(plt)
rb = BrtcReprBuilder()
rb.addMD(strip_margin("""
| ## Linear Regression Result
| ### Summary
|
"""))
rb.addHTML(html_result)
rb.addMD(strip_margin("""
|
| ### Predicted vs Actual
| {image1}
|
| ### Fit Diagnostics
| {image2}
| {image3}
| {image4}
""".format(image1=fig_actual_predict,
image2=fig_residual_1,
image3=fig_residual_2,
image4=fig_residual_3
)))
model = _model_dict('linear_regression_model')
model['features'] = feature_cols
model['label'] = label_col
model['coefficients'] = lr_model_fit.params.values
model['fit_intercept'] = fit_intercept
model['r2'] = lr_model_fit.rsquared
model['adjusted_r2'] = lr_model_fit.rsquared_adj
model['aic'] = lr_model_fit.aic
model['bic'] = lr_model_fit.bic
model['f_static'] = lr_model_fit.fvalue
model['tvalues'] = lr_model_fit.tvalues.values
model['pvalues'] = lr_model_fit.pvalues.values
model['_repr_brtc_'] = rb.get()
model['summary0'] = summary0
model['summary1'] = summary1
model['summary2'] = summary2
lr_model_fit.remove_data()
model['lr_model'] = lr_model_fit
return {'model' : model}
def linear_regression_predict(table, model, **params):
check_required_parameters(_linear_regression_predict, params, ['table', 'model'])
if '_grouped_data' in model:
return _function_by_group(_linear_regression_predict, table, model, **params)
else:
return _linear_regression_predict(table, model, **params)
def _linear_regression_predict(table, model, prediction_col='prediction'):
result=table.copy()
if 'features' in model:
feature_cols = model['features']
else:
feature_cols = model['feature_cols']
if 'lr_model' in model:
feature_names, features = check_col_type(table, feature_cols)
features =
|
pd.DataFrame(features, columns=feature_names)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Sun May 22 10:30:01 2016
SC process signups functions
@author: tkc
"""
#%%
import pandas as pd
import numpy as np
from datetime import datetime, date
import re, glob, math
from openpyxl import load_workbook # writing to Excel
from PIL import Image, ImageDraw, ImageFont
import tkinter as tk
import pkg.SC_config as cnf # _OUTPUT_DIR and _INPUT_DIR
#%%
def combinephrases(mylist):
''' Combine list of phrases using commas & and '''
if len(mylist)==1:
return str(mylist[0])
elif len(mylist)==2:
tempstr=str(mylist[0])+ ' and ' +str(mylist[1])
return tempstr
else:
rest=mylist[:-1]
rest=[str(i) for i in rest]
last=mylist[-1]
tempstr=', '.join(rest) +' and ' + str(last)
return tempstr#%%
def writetoxls(df, sheetname, xlsfile):
''' Generic write of given df to specified tab of given xls file '''
book=load_workbook(xlsfile)
writer=pd.ExcelWriter(xlsfile, engine='openpyxl', datetime_format='mm/dd/yy', date_format='mm/dd/yy')
writer.book=book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
df.to_excel(writer,sheet_name=sheetname,index=False) # this overwrites existing file
writer.save() # saves xls file with all modified data
return
def loadtransfers(df, signups):
''' Load transferred players and add to signups (then run player ID);
transfers added as normal players but need fake billing entries
'''
df=df.rename(columns={'Fname':'First','Lname':'Last','Street':'Address','Parish':'Parish of Registration'})
df=df.rename(columns={'Phone':'Phone1','Birthdate':'DOB','Sex':'Gender','Open/Closed':'Ocstatus'})
# Replace Girl, Boy with m f
df.loc[:,'Gender']=df.Gender.replace('F','Girl')
df.loc[:,'Gender']=df.Gender.replace('M','Boy')
# Manually enter sport
print('Enter sport for transferred players')
sport=input()
df.loc[:,'Sport']=sport
df=df.dropna(subset=['First']) # remove blank rows if present
mycols=[col for col in df if col in signups]
df=df[mycols]
df=formatnamesnumbers(df)
# place date/transfer in timestamp
mystamp=datetime.strftime(datetime.now(),'%m/%d/%y')+' transfer'
df.loc[:,'Timestamp']=mystamp
mycols=signups.columns
signups=signups.append(df, ignore_index=True)
signups=signups[mycols]
return signups
def packagetransfers(teams, Mastersignups, famcontact, players, season, year, acronyms, messfile):
''' Package roster and contact info by sport- school and save as separate xls files
also generate customized e-mails in single log file (for cut and paste send to appropriate persons)
args:
teams - loaded team list
mastersignups - signups w/ team assignment
players -player DB
famcontact - family contact db
season - Fall, Winter or Spring
year - starting sports year (i.e. 2019 for 2019-20 school year)
acronyms - school/parish specific abbreviations
messfile - e-mail message template w/ blanks
returns:
'''
teams=teams[pd.notnull(teams['Team'])]
transferteams=np.ndarray.tolist(teams[teams['Team'].str.contains('#')].Team.unique())
transSU=Mastersignups[Mastersignups['Team'].isin(transferteams)]
# ensure that these are from correct season/year
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season)
transSU=transSU.loc[(transSU['Sport'].isin(sportlist)) & (transSU['Year']==year)] # season is not in mastersignups... only individual sports
# get family contact info from famcontacts
transSU=pd.merge(transSU, famcontact, how='left', on=['Famkey'], suffixes=('','_r'))
# get school from players.csv
transSU=pd.merge(transSU, players, how='left', on=['Plakey'], suffixes=('','_r2'))
# get division from Teams xls (for roster)
transSU=pd.merge(transSU, teams, how='left', on=['Team'], suffixes=('','_r3')) # effectively adds other team info for roster toall players
transSU.loc[:,'Role']='Player' # add column for role
# transSU['Open/Closed']='Closed'
# Sort by grade pre-split
transSU.loc[:,'Grade']=transSU.Grade.replace('K',0)
transSU.loc[:,'Grade']=transSU.Grade.apply(int)
transSU=transSU.sort_values(['Grade'], ascending=True)
transSU.loc[:,'Grade']=transSU.Grade.replace(0,'K') # replace K with zero to allow sorting
# Column for sorting by transferred to school
transSU.loc[:,'Transchool']=transSU['Team'].str.split('#').str[0]
grouped=transSU.groupby(['Sport','Transchool'])
for [sport, school], group in grouped:
# prepare roster tab
xlsname=cnf._OUTPUT_DIR+'\\Cabrini_to_'+school+'_'+sport+'_'+str(year)+'.xlsx'
writer=pd.ExcelWriter(xlsname, engine='openpyxl')
Transferroster=organizeroster(group)
Transferroster=Transferroster.sort_values(['Team', 'Sex', 'Grade'], ascending=True)
Transferroster=replaceacro(Transferroster,acronyms)
Transferroster.to_excel(writer,sheet_name='roster',index=False)
# prep contacts tab
mycols=['First', 'Last', 'Grade', 'Gender', 'School', 'Phone1', 'Text1','Email1', 'Phone2', 'Text2',
'Email2', 'Pfirst1', 'Plast1', 'Pfirst2', 'Plast2', 'Team']
Transfercontacts=group[mycols]
Transfercontacts.to_excel(writer, sheet_name='contacts', index=False)
writer.save()
# Now generate list of e-mails for all schools/directors
logfile='transfers_director_emails_log.txt'
with open(logfile,'w+') as emaillog:
# Read generic file to sport director
with open(messfile, 'r') as file:
blankmessage=file.read()
for [sport, school], group in grouped:
plagroup=group.groupby(['Grade', 'Gender'])
platypes=[] # list of # of players by grade, gender
gradedict={'K':'K', 1:'1st', 2:'2nd',3:'3rd',4:'4th',5:'5th',6:'6th', 7:'7th',8:'8th'}
genderdict={'f':'girls', 'F':'girls','m':'boys','M':'boys'}
for [grade, gender], group in plagroup:
numplays=str(int(group['Grade'].count()))
grname=gradedict.get(grade)
genname=genderdict.get(gender)
platypes.append(numplays+' '+grname+' '+genname)
plalist=combinephrases(platypes)
thismess=blankmessage.replace('$SCHOOL', school)
thismess=thismess.replace('$SPORT', sport)
thismess=thismess.replace('$PLALIST', plalist)
emaillog.write(thismess)
emaillog.write('\n\n')
return
def findcards():
'''Search ID cards folder and return player # and file link
cards resized to 450x290 pix jpg in photoshop (scripts-image processor)
keys are either player number as string or coach CYC ID, vals are links to files'''
cardlist=glob.glob('%s\\IDcards\\*.jpg' %cnf._OUTPUT_DIR, recursive=True)
# construct list of [card #, filename]
nums=[i.split('\\')[-1] for i in cardlist]
nums=[i.split('_')[0] if '_' in i else i.split('--')[0] for i in nums ]
cards={} # dict for card numbers/filenames
for i,num in enumerate(nums):
cards.update({num: cardlist[i]})
return cards
def makethiscard(IDlist, team):
''' Passes link to ID card or player name (if missing) From team's list of player numbers (in alphabetical order), find/open card links, and create single image'''
# make the master image and determine image array size
margin=10 # pix on all sides
if len(IDlist)<11: # use 2 x 5 array (horiz)
wide=2
high=5
elif len(IDlist)<13: # 4w x 3 h (vert)
wide=4
high=3
elif len(IDlist)<22: # 3x by 5-7 high (horiz); max 21
wide=3
high=math.ceil(len(IDlist)/3)
else: # more than 21 ... yikes
wide=3
high=math.ceil(len(IDlist)/3)
cardimage = Image.new('RGB', (450*wide+2*margin, 300*high+2*margin), "white") # blank image of correct size
draw=ImageDraw.Draw(cardimage) # single draw obj for adding missing card names
ttfont=ImageFont.truetype('arial.ttf', size=36)
for i,fname in enumerate(IDlist):
row=i//high # remainder is row
col=i%high # mod is correct column
xpos=margin+row*450
ypos=margin+col*300
try:
thiscard=Image.open(fname)
thiscard=thiscard.resize((450, 300), Image.ANTIALIAS)
cardimage.paste(im=thiscard, box=(xpos, ypos)) # paste w/ xpos,ypos as upper left
except: # occurs when "first last" present instead of file name/path
# blankcard=Image.new('RGB', (450, 300)) # make blank image as placeholder
draw.text((xpos+50,ypos+100),fname,font=ttfont, fill="red")
return cardimage
''' TESTING
i=0 team=teamlist[i]
'''
def makeCYCcards(df, players, teams, coaches, season, year, **kwargs):
''' From mastersignups and teams, output contact lists for all teams/all sports separately
team assignments must be finished
args:
df -- mastersignups dataframe
players - player info dataframe
teams - this year's teams csv
coaches - full coach CYC info list
season - Fall, Winter or Spring
kwargs:
showmissing - True (shows missing player's name); False- skip missing player
otherSchools - default False (also make card sheets for transferred teams/players)
kwargs={'showmissing':False}
missing = makeCYCcards(Mastersignups, players, teams, coaches, season, year, **{'showmissing':True} )
missing = makeCYCcards(Mastersignups, players, teams, coaches, season, year, **{'showmissing':False} )
'''
# Slice by sport: Basketball (null for winter?), Soccer, Volleyball, Baseball, T-ball, Softball, Track)
cards=findcards() # dictionary with number: filename combo for existing CYC cards
df=df[(df['Year']==year)]
df=df.reset_index(drop=True)
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season)
df=df[df['Sport'].isin(sportlist)] # season is not in mastersignups... only individual sports
# Make list of teams that need cards (all track and others >1st grade)
def processGrade(val):
if val=='K':
return 0
else:
return int(val)
teams.loc[:,'Grade'] = teams['Grade'].apply(lambda x:processGrade(x))
if not kwargs.get('otherSchools', False):
# all transfer teams with contain # (e.g. SMOS#3G) so remove these
# dropped by default
teams = teams[~teams['Team'].str.contains('#')]
# need track teams or any team from grades 2+
cardTeamList= teams[ (teams['Grade']>1) | (teams['Sport']=='Track') ]['Team'].unique()
df=df[ df['Team'].isin(cardTeamList) ]
df=df.sort_values(['Last'])
# plakeys as string will be easiest for below matching
df.loc[:,'Plakey']=df['Plakey'].astype(int)
df.loc[:,'Plakey']=df['Plakey'].astype(str)
def getName(gr, pk):
# get name from plakey as string
match=gr[gr['Plakey']==pk]
name=match.iloc[0]['First'] + ' ' + match.iloc[0]['Last']
return name
teamgrouped = df.groupby(['Team'])
missinglist=[] # list of plakeys with missing card
for team, gr in teamgrouped:
# keys in card dict are strings
IDlist = [str(int(i)) for i in gr.Plakey.unique()]
missinglist.extend([i for i in gr.Plakey.unique() if i not in cards.keys() ])
if not kwargs.get('showmissing', False):
# Shows only valid cards, drops missing names
IDlist = [ cards.get(i) for i in IDlist if i in cards.keys() ]
filename='Cards_'+ team +'.jpg'
else: # show cards and missing name when card image not in IDcards folder
IDlist = [cards.get(i) if i in cards.keys() else getName(gr, i) for i in IDlist ]
filename='Cards_'+ team +'_all.jpg'
# get team's coaches
IDlist.extend(getcoachIDs(team, teams, coaches, cards)) # add coach ID image file or first/last if missing
cardimage =makethiscard(IDlist, team) # directly saved
# save the card file
cardimage.save(cnf._OUTPUT_DIR+'\\'+filename)
missingcards=players[players['Plakey'].isin(missinglist)]
missingcards=missingcards.sort_values(['Grade','Last'])
return missingcards
def getcoachIDs(team, teams, coaches, cards):
''' Returns CYC IDs for all team's coaches '''
thisteam=teams[teams['Team']==team]
IDlist=[]
if len(thisteam)!=1:
print(team, 'not found in current teams list')
return IDlist # blank list
thisteam=thisteam.dropna(subset=['Coach ID'])
if len(thisteam)!=1:
print('Coach ID not found for', team)
return IDlist # blank list
if thisteam.iloc[0]['Coach ID']!='': # possibly blank
thisID=thisteam.iloc[0]['Coach ID'].strip()
if thisID in cards:
IDlist.append(cards.get(thisID,'')) # file path to this coach's ID
else: # get first/last
thiscoach=coaches[coaches['Coach ID']==thisID]
if len(thiscoach)==1:
IDlist.append(thiscoach.iloc[0]['Fname']+' '+thiscoach.iloc[0]['Lname'])
else:
print("Couldn't find coach ", thisID)
thisteam=thisteam.dropna(subset=['AssistantIDs'])
if len(thisteam)==1: # grab asst IDs if they exist
asstIDs=thisteam.iloc[0]['AssistantIDs']
asstIDs=[str(s).strip() for s in asstIDs.split(",")]
for i, asstID in enumerate(asstIDs):
if asstID in cards:
IDlist.append(cards.get(asstID,'')) # found assistant coaches ID card image
else: # can't find ... get assistant first last
thisasst=coaches[coaches['Coach ID']==asstID] # matching asst coach row
if len(thisasst)==1:
IDlist.append(thisasst.iloc[0]['Fname']+' '+thisasst.iloc[0]['Lname'])
else:
print("Couldn't find coach ", asstID)
return IDlist
def autocsvbackup(df, filename, newback=True):
''' Pass df (i.e players for backup and basename (i.e. "family_contact" for file.. finds list of existing backups and keeps ones of
certain ages based on targetdates list;
can't remember why was newback=False was needed (always true here to make new backup)
'''
# TODO fix this!
pass
return
def parseDate(val):
'''
Conversion of date string to datetime.date (always header line 2 40:60)
Possible date formats: 20180316 (newer style) or 03/15/2018 (older style)
For NGA files Date format changed from 03/15/2018 to 20180316 (on jday 75 in 2018)
time format: 221100 or 22:11:00 (sometimes w/ UTC)
not terribly concerned w/ time
possible date formats: 0) 03/01/2018, 3/1/2018, 3/1/18 or 03/01/18
2) 1/1/19 2) 2019-1-1 3) 2019-01-01
'''
if not isinstance(val, str):
return val
else:
if ' ' in val: # Remove time substring (but will fail for 3 Oct 2019)
val=val.split(' ')[0] # strip time substring if present
patterns=['\d{1,2}/\d{1,2}/\d{2,4}', '\d{4}-\d{1,2}-\d{1,2}', '\d{1,2}-\d{1,2}-\d{4}']
for i, patt in enumerate(patterns):
match=re.search(r'%s' %patt, val)
if match:
if i==0: # Extract 03/16/2018 (or rarely 28/10/2019 style)
try:
(mo,dy,yr)=[int(i) for i in val.split('/')]
if yr<100 and len(str(yr))==2: # handle 2 digit year
yr=int('20'+str(yr))
if mo < 13: # normal US version (month first)
return datetime(yr, mo, dy).date()
# handle day month reversal
elif dy<13: # possible month day reverse
print('Month and day reverse for %s' %val)
return datetime(yr, dy, mo).date() # Assume month/day switch
except:
print('Problem extracting date from ', val)
return None
if i==1: # extract 2017-01-01 style (year first)
try:
(yr,mo,dy)=[int(i) for i in val.split('-')]
if mo < 13: # normal US version (month first)
return datetime(yr, mo, dy).date()
# handle day month reversal
elif dy<13: # possible month day reverse
print('Month and day reverse for %s' %val)
return datetime(yr, dy, mo).date() # Assume month/day switch
except:
print('Problem extracting date from ', val)
return val
if i==2: # extract 01-01-2019 style (year last)
try:
(mo,dy,yr)=[int(i) for i in val.split('-')]
if mo < 13: # normal US version (month first)
return datetime(yr, mo, dy).date()
# handle day month reversal
elif dy<13: # possible month day reverse
print('Month and day reverse for %s' %val)
return datetime(yr, dy, mo).date() # Assume month/day switch
except:
print('Problem extracting date from ', val)
return val
def loadProcessPlayerInfo():
'''Loads and processes players & family contacts (but not signup file)
args:
gsignups -- google signups
season - 'Fall', 'Winter', 'Spring
year - 4 digit int (uses fall value all school year.. ie. 2018-19 year is always
2018)
'''
players=pd.read_csv(cnf._INPUT_DIR + '\\players.csv', encoding='cp437') # load existing player data (need to find correct DOB format)
players.loc[:,'Grade']=players.Grade.replace('K',0)
players.loc[:,'Grade']=players.Grade.replace('pK',0) # just make them 0s for now
players.loc[:,'Grade']=players.Grade.astype(int)
# TODO use fixdates if players imports as string (various formats possible)
# players['DOB']=players['DOB'].apply(lambda x: fixDates(x))
if not isinstance(players.DOB[0], pd.Timestamp):# sometimes direct import to pd timestamp works, other times not
players.loc[:,'DOB']=players.DOB.apply(lambda x: parseDate(x))
famcontact=pd.read_csv(cnf._INPUT_DIR + '\\family_contact.csv', encoding='cp437') # load family contact info
famcontact=formatnamesnumbers(famcontact)
return players, famcontact
def loadProcessGfiles(gsignups, season, year):
'''Loads and processes players, family contacts and signup file, gets active
season and year
args:
gsignups -- google signups
season - 'Fall', 'Winter', 'Spring
year - 4 digit int (uses fall value all school year.. ie. 2018-19 year is always
2018)
'''
players=pd.read_csv(cnf._INPUT_DIR + '\\players.csv', encoding='cp437') # load existing player data (need to find correct DOB format)
players.loc[:,'Grade']=players.Grade.replace('K',0)
players.loc[:,'Grade']=players.Grade.replace('pK',0) # just make them 0s for now
players.loc[:,'Grade']=players.Grade.astype(int)
# TODO use fixdates if players imports as string (various formats possible)
# players['DOB']=players['DOB'].apply(lambda x: fixDates(x))
if not isinstance(players.DOB[0], pd.Timestamp):# sometimes direct import to pd timestamp works, other times not
players.loc[:,'DOB']=players.DOB.apply(lambda x: parseDate(x))
famcontact=pd.read_csv(cnf._INPUT_DIR + '\\family_contact.csv', encoding='cp437') # load family contact info
if season=='Winter':
gsignups['Sport']='Basketball'
# TODO determine where multiple sports converted to separate lines
duplicated=gsignups[gsignups.duplicated(subset=['First', 'Last','Grade','Sport'])]
if len(duplicated)>0:
print('Remove duplicate signups for %s' %", ".join(duplicated.Last.unique().tolist()))
gsignups=gsignups.drop_duplicates(subset=['First', 'Last','Grade','Sport'])
gsignups.loc[:,'Sport']=gsignups['Sport'].str.replace('Volleyball','VB')
#gsignups.loc[:,'Sport']=gsignups.loc[:,'Sport'].str.replace('Volleyball','VB').copy()
#gsignups.loc[:,'Sport']=gsignups['Sport'].replace({'Volleyball':'VB'}, regex=True).copy()
missing=[i for i in ['Famkey','Plakey'] if i not in gsignups.columns]
for col in missing: # add blank vals
gsignups.loc[gsignups.index, col]=np.nan
# convert assorted DOB strings to datetime.date
if not isinstance(gsignups.DOB[0], pd.Timestamp):# sometimes direct import to pd timestamp works, other times not
gsignups.loc[:,'DOB']=gsignups.DOB.apply(lambda x: parseDate(x))
# Get year from signup file name
outputduplicates(gsignups) # quick check of duplicates output in console window (already removed from signups)
gsignups=formatnamesnumbers(gsignups) # format phone numbers, names to title case, standardize schools, etc.
famcontact=formatnamesnumbers(famcontact)
def processGkey(val):
''' Some plakey/famkey copied to drive... must convert nan(float), whitespace or
number as string to either nan or int
'''
if isinstance(val, str):
val=''.join(val.split(' '))
if val=='':
return np.nan
else:
try:
return int(val)
except:
return np.nan
else:
return np.nan
# ensure gsignups has only int or nan (no whitespace)
gsignups.loc[:,'Plakey']=gsignups['Plakey'].apply(lambda x: processGkey(x))
gsignups.loc[:,'Famkey']=gsignups['Famkey'].apply(lambda x: processGkey(x))
return players, famcontact, gsignups
def loadprocessfiles(signupfile):
'''Loads and processes players, family contacts and signup file, gets active
season and year '''
players=pd.read_csv(cnf._INPUT_DIR + '\\players.csv', encoding='cp437') # load existing player data (need to find correct DOB format)
players.loc[:,'Grade']=players.Grade.replace('K',0)
players.loc[:,'Grade']=players.Grade.replace('pK',0) # just make them 0s for now
players.loc[:,'Grade']=players.Grade.astype(int)
# TODO use fixdates if players imports as string (various formats possible)
# players['DOB']=players['DOB'].apply(lambda x: fixDates(x))
if type(players.DOB[0])!=pd.Timestamp: # sometimes direct import to pd timestamp works, other times not
try:
players.loc[:'DOB']=parseDate(players.DOB) # return properly converted date columns series
except:
print('Failure converting player DOB to datetime/timestamp')
famcontact=pd.read_csv(cnf._INPUT_DIR + '\\family_contact.csv', encoding='cp437') # load family contact info
# read this season's sports signup file and rename columns
if signupfile.endswith('.csv'):
SUraw=pd.read_csv(signupfile)
elif 'xls' in signupfile:
try:
SUraw=pd.read_excel(signupfile, sheetname='Raw') # may or may not have plakey/famkey
except:
SUraw=
|
pd.read_excel(signupfile)
|
pandas.read_excel
|
#!/usr/bin/env python
# encoding:utf-8
"""
Author : <NAME>
Date : 2021/8/4
Time: 20:06
File: precision_table_plot.py
HomePage : http://github.com/yuanqingmei
Email : <EMAIL>
compute the avg std max min values and draw the box plot of precision and recall.
"""
import time
def precision_table_plot(working_dir="F:\\NJU\\MTmeta\\experiments\\pooled\\",
plot_dir="F:\\NJU\\MTmeta\\experiments\\pooled\\plots\\"):
import os
import csv
import pandas as pd
import matplotlib.pyplot as plt
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
pd.set_option('display.width', 5000)
plt.rcParams['savefig.dpi'] = 900 # 图片像素
plt.rcParams['figure.figsize'] = (8.0, 4.0)
os.chdir(working_dir)
df =
|
pd.read_csv(working_dir + "AUCs.csv", keep_default_na=False, na_values=[""])
|
pandas.read_csv
|
# pep8: disable=E501
from packaging.version import Version
import h5py
import os
import pytest
import shutil
import importlib
import random
import json
import tensorflow as tf
from tensorflow.keras.models import Sequential as TfSequential
from tensorflow.keras.layers import Dense as TfDense
from tensorflow.keras.optimizers import SGD as TfSGD
import keras
from keras.models import Sequential
from keras.layers import Layer, Dense
from keras import backend as K
from keras.optimizers import SGD
import sklearn.datasets as datasets
import pandas as pd
import numpy as np
import yaml
from unittest import mock
import mlflow
import mlflow.keras
import mlflow.pyfunc.scoring_server as pyfunc_scoring_server
from mlflow import pyfunc
from mlflow.exceptions import MlflowException
from mlflow.models import Model, infer_signature
from mlflow.models.utils import _read_example
from mlflow.store.artifact.s3_artifact_repo import S3ArtifactRepository
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.utils.environment import _mlflow_conda_env
from mlflow.utils.file_utils import TempDir
from mlflow.utils.model_utils import _get_flavor_configuration
from tests.helper_functions import pyfunc_serve_and_score_model
from tests.helper_functions import (
_compare_conda_env_requirements,
_assert_pip_requirements,
_is_available_on_pypi,
_is_importable,
)
from tests.helper_functions import set_boto_credentials # pylint: disable=unused-import
from tests.helper_functions import mock_s3_bucket # pylint: disable=unused-import
from tests.pyfunc.test_spark import score_model_as_udf
from mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS
EXTRA_PYFUNC_SERVING_TEST_ARGS = [] if _is_available_on_pypi("keras") else ["--no-conda"]
@pytest.fixture(scope="module", autouse=True)
def fix_random_seed():
SEED = 0
os.environ["PYTHONHASHSEED"] = str(SEED)
random.seed(SEED)
np.random.seed(SEED)
if Version(tf.__version__) >= Version("2.0.0"):
tf.random.set_seed(SEED)
else:
tf.set_random_seed(SEED)
@pytest.fixture(scope="module")
def data():
iris = datasets.load_iris()
data = pd.DataFrame(
data=np.c_[iris["data"], iris["target"]], columns=iris["feature_names"] + ["target"]
)
y = data["target"]
x = data.drop("target", axis=1)
return x, y
@pytest.fixture(scope="module")
def model(data):
x, y = data
model = Sequential()
model.add(Dense(3, input_dim=4))
model.add(Dense(1))
# Use a small learning rate to prevent exploding gradients which may produce
# infinite prediction values
lr = 0.001
kwargs = (
# `lr` was renamed to `learning_rate` in keras 2.3.0:
# https://github.com/keras-team/keras/releases/tag/2.3.0
{"lr": lr}
if Version(keras.__version__) < Version("2.3.0")
else {"learning_rate": lr}
)
model.compile(loss="mean_squared_error", optimizer=SGD(**kwargs))
model.fit(x.values, y.values)
return model
@pytest.fixture(scope="module")
def tf_keras_model(data):
x, y = data
model = TfSequential()
model.add(TfDense(3, input_dim=4))
model.add(TfDense(1))
model.compile(loss="mean_squared_error", optimizer=TfSGD(learning_rate=0.001))
model.fit(x.values, y.values)
return model
@pytest.fixture(scope="module")
def predicted(model, data):
return model.predict(data[0].values)
@pytest.fixture(scope="module")
def custom_layer():
class MyDense(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super().__init__(**kwargs)
def build(self, input_shape):
# pylint: disable=attribute-defined-outside-init
self.kernel = self.add_weight(
name="kernel",
shape=(input_shape[1], self.output_dim),
initializer="uniform",
trainable=True,
)
super().build(input_shape)
def call(self, x):
# pylint: disable=arguments-differ
return K.dot(x, self.kernel)
def compute_output_shape(self, input_shape):
return (input_shape[0], self.output_dim)
def get_config(self):
return {"output_dim": self.output_dim}
return MyDense
@pytest.fixture(scope="module")
def custom_model(data, custom_layer):
x, y = data
model = Sequential()
model.add(Dense(6, input_dim=4))
model.add(custom_layer(1))
model.compile(loss="mean_squared_error", optimizer="SGD")
model.fit(x.values, y.values, epochs=1)
return model
@pytest.fixture(scope="module")
def custom_predicted(custom_model, data):
return custom_model.predict(data[0].values)
@pytest.fixture
def model_path(tmpdir):
return os.path.join(tmpdir.strpath, "model")
@pytest.fixture
def keras_custom_env(tmpdir):
conda_env = os.path.join(str(tmpdir), "conda_env.yml")
_mlflow_conda_env(conda_env, additional_pip_deps=["keras", "tensorflow", "pytest"])
return conda_env
@pytest.mark.disable_prevent_infer_pip_requirements_fallback
def test_that_keras_module_arg_works(model_path):
class MyModel(object):
def __init__(self, x):
self._x = x
def __eq__(self, other):
return self._x == other._x
def save(self, path, **kwargs):
# pylint: disable=unused-argument
with h5py.File(path, "w") as f:
f.create_dataset(name="x", data=self._x)
class FakeKerasModule(object):
__name__ = "some.test.keras.module"
__version__ = "42.42.42"
@staticmethod
def load_model(file, **kwargs):
# pylint: disable=unused-argument
# `Dataset.value` was removed in `h5py == 3.0.0`
if Version(h5py.__version__) >= Version("3.0.0"):
return MyModel(file.get("x")[()].decode("utf-8"))
else:
return MyModel(file.get("x").value)
original_import = importlib.import_module
def _import_module(name, **kwargs):
if name.startswith(FakeKerasModule.__name__):
return FakeKerasModule
else:
return original_import(name, **kwargs)
with mock.patch("importlib.import_module") as import_module_mock:
import_module_mock.side_effect = _import_module
x = MyModel("x123")
path0 = os.path.join(model_path, "0")
with pytest.raises(MlflowException):
mlflow.keras.save_model(x, path0)
mlflow.keras.save_model(x, path0, keras_module=FakeKerasModule, save_format="h5")
y = mlflow.keras.load_model(path0)
assert x == y
path1 = os.path.join(model_path, "1")
mlflow.keras.save_model(x, path1, keras_module=FakeKerasModule.__name__, save_format="h5")
z = mlflow.keras.load_model(path1)
assert x == z
# Tests model log
with mlflow.start_run() as active_run:
with pytest.raises(MlflowException):
mlflow.keras.log_model(x, "model0")
mlflow.keras.log_model(x, "model0", keras_module=FakeKerasModule, save_format="h5")
a = mlflow.keras.load_model("runs:/{}/model0".format(active_run.info.run_id))
assert x == a
mlflow.keras.log_model(
x, "model1", keras_module=FakeKerasModule.__name__, save_format="h5"
)
b = mlflow.keras.load_model("runs:/{}/model1".format(active_run.info.run_id))
assert x == b
@pytest.mark.parametrize(
"build_model,save_format",
[(model, None), (tf_keras_model, None), (tf_keras_model, "h5"), (tf_keras_model, "tf")],
)
@pytest.mark.large
def test_model_save_load(build_model, save_format, model_path, data):
x, _ = data
keras_model = build_model(data)
if build_model == tf_keras_model:
model_path = os.path.join(model_path, "tf")
else:
model_path = os.path.join(model_path, "plain")
expected = keras_model.predict(x.values)
kwargs = {"save_format": save_format} if save_format else {}
mlflow.keras.save_model(keras_model, model_path, **kwargs)
# Loading Keras model
model_loaded = mlflow.keras.load_model(model_path)
# When saving as SavedModel, we actually convert the model
# to a slightly different format, so we cannot assume it is
# exactly the same.
if save_format != "tf":
assert type(keras_model) == type(model_loaded)
np.testing.assert_allclose(model_loaded.predict(x.values), expected, rtol=1e-5)
# Loading pyfunc model
pyfunc_loaded = mlflow.pyfunc.load_model(model_path)
np.testing.assert_allclose(pyfunc_loaded.predict(x).values, expected, rtol=1e-5)
# pyfunc serve
scoring_response = pyfunc_serve_and_score_model(
model_uri=os.path.abspath(model_path),
data=pd.DataFrame(x),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED,
extra_args=EXTRA_PYFUNC_SERVING_TEST_ARGS,
)
print(scoring_response.content)
actual_scoring_response = pd.read_json(
scoring_response.content, orient="records", encoding="utf8"
).values.astype(np.float32)
np.testing.assert_allclose(actual_scoring_response, expected, rtol=1e-5)
# test spark udf
spark_udf_preds = score_model_as_udf(
model_uri=os.path.abspath(model_path), pandas_df=pd.DataFrame(x), result_type="float"
)
np.allclose(np.array(spark_udf_preds), expected.reshape(len(spark_udf_preds)))
@pytest.mark.large
def test_signature_and_examples_are_saved_correctly(model, data):
signature_ = infer_signature(*data)
example_ = data[0].head(3)
for signature in (None, signature_):
for example in (None, example_):
with TempDir() as tmp:
path = tmp.path("model")
mlflow.keras.save_model(
model, path=path, signature=signature, input_example=example
)
mlflow_model = Model.load(path)
assert signature == mlflow_model.signature
if example is None:
assert mlflow_model.saved_input_example_info is None
else:
assert all((_read_example(mlflow_model, path) == example).all())
@pytest.mark.large
def test_custom_model_save_load(custom_model, custom_layer, data, custom_predicted, model_path):
x, _ = data
custom_objects = {"MyDense": custom_layer}
mlflow.keras.save_model(custom_model, model_path, custom_objects=custom_objects)
# Loading Keras model
model_loaded = mlflow.keras.load_model(model_path)
assert all(model_loaded.predict(x.values) == custom_predicted)
# pyfunc serve
scoring_response = pyfunc_serve_and_score_model(
model_uri=os.path.abspath(model_path),
data=pd.DataFrame(x),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED,
extra_args=EXTRA_PYFUNC_SERVING_TEST_ARGS,
)
assert np.allclose(
|
pd.read_json(scoring_response.content, orient="records", encoding="utf8")
|
pandas.read_json
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Step 3X Preprocessing: Feature Selection
License_info: ISC
ISC License
Copyright (c) 2020, <NAME>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
# Futures
#from __future__ import print_function
# Built-in/Generic Imports
import os
# Libs
import argparse
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
import matplotlib as m
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LassoCV
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.feature_selection import SelectFromModel
import statsmodels.api as sm
from sklearn.linear_model import LogisticRegressionCV
from sklearn.feature_selection import RFE
# Own modules
import utils.data_visualization_functions as vis
import utils.data_handling_support_functions as sup
__author__ = '<NAME>'
__copyright__ = 'Copyright 2020, Christian Doppler Laboratory for ' \
'Embedded Machine Learning'
__credits__ = ['']
__license__ = 'ISC'
__version__ = '0.2.0'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'Experiental'
register_matplotlib_converters()
#Global settings
np.set_printoptions(precision=3)
#Suppress print out in scientific notiation
np.set_printoptions(suppress=True)
parser = argparse.ArgumentParser(description='Step 3 - Perform feature selection')
parser.add_argument("-conf", '--config_path', default="config/debug_timedata_omxS30.ini",
help='Configuration file path', required=False)
args = parser.parse_args()
def predict_features_simple(X, y):
'''
'''
clf = LogisticRegression(random_state=0, solver='lbfgs', multi_class='multinomial').fit(X, y)
return clf.score(X, y)
def execute_lasso_feature_selection(X_scaled, y, conf, image_save_directory):
'''
'''
print("Feature selection with lasso regression")
reg = LassoCV(cv=10, max_iter=100000)
reg.fit(X_scaled, y)
coef = pd.Series(reg.coef_, index=X_scaled.columns)
print("Best alpha using built-in LassoCV: %f" % reg.alpha_)
print("Best score using built-in LassoCV: %f" % reg.score(X_scaled, y))
print("Lasso picked " + str(sum(coef != 0)) + " variables and eliminated the other " + str(
sum(coef == 0)) + " variables")
imp_coef = coef.sort_values()
coefList = list(imp_coef[imp_coef != 0].index)
print("Lasso coefficient list\n:", coefList)
# plt.figure()
m.rcParams['figure.figsize'] = (8.0, 20.0)
imp_coef.plot(kind="barh")
plt.title("Feature importance using Lasso Model")
plt.tight_layout()
vis.save_figure(plt.gcf(), image_save_directory=image_save_directory, filename="Lasso_Model_Weights")
#if image_save_directory:
# if not os.path.isdir(image_save_directory):
# os.makedirs(image_save_directory)
# plt.savefig(os.path.join(image_save_directory, conf['Common'].get('dataset_name') + '_Lasso_Model_Weights'), dpi=300)
#plt.show(block = False)
return coefList
def execute_treebased_feature_selection(X_scaled, y, conf, image_save_directory):
'''
'''
print("Tree based feature selection")
clf = ExtraTreesClassifier(n_estimators=50)
clf = clf.fit(X_scaled, y)
print(clf.feature_importances_)
print("Best score: %f" % clf.score(X_scaled, y))
model = SelectFromModel(clf, prefit=True)
X_new = model.transform(X_scaled)
X_new.shape
threshold = 0.010
tree_coef = pd.Series(clf.feature_importances_, index=X_scaled.columns)
print("Tree search picked " + str(sum(tree_coef >= threshold)) + " variables and eliminated the other " + str(
sum(tree_coef < threshold)) + " variables")
imp_treecoef = tree_coef.sort_values()
treecoefList = list(imp_treecoef[imp_treecoef > threshold].index)
print("Tree based coefficent list:\n", treecoefList)
plt.figure()
m.rcParams['figure.figsize'] = (8.0, 20.0)
imp_treecoef.plot(kind="barh")
plt.title("Feature importance using Tree Search Model")
plt.vlines(threshold, 0, len(X_scaled.columns), color='red')
plt.tight_layout()
vis.save_figure(plt.gcf(), image_save_directory=image_save_directory, filename="Tree_Based_Importance")
#if image_save_directory:
# if not os.path.isdir(image_save_directory):
# os.makedirs(image_save_directory)
# plt.savefig(os.path.join(image_save_directory, conf['Common'].get('dataset_name') + '_Tree_Based_Importance'), dpi=300)
#plt.show(block = False)
return treecoefList
def execute_backwardelimination_feature_selection(X_scaled, y):
'''
'''
print("Backward elimination")
cols = list(X_scaled.columns)
pmax = 1
while (len(cols) > 0):
p = []
X_1 = X_scaled[cols]
X_1 = sm.add_constant(X_1)
model = sm.OLS(y, X_1).fit()
p = pd.Series(model.pvalues.values[1:], index=cols)
pmax = max(p)
feature_with_p_max = p.idxmax()
if (pmax > 0.05):
cols.remove(feature_with_p_max)
else:
break
selected_features_BE = cols
print("Selected features:")
print(selected_features_BE)
print("\nNumber of features={}. Original number of features={}\n".format(len(selected_features_BE),
len(X_scaled.columns)))
[print("column {} removed".format(x)) for x in X_scaled.columns if x not in selected_features_BE]
print("Finished")
return selected_features_BE
def execute_recursive_elimination_feature_selection(X_scaled, y):
'''
'''
print("Recursive elimination")
model = LogisticRegressionCV(solver='liblinear', cv=3)
print("Start Recursive Elimination. Fit model with {} examples.".format(X_scaled.shape[0]))
# Initializing RFE model, 3 features selected
rfe = RFE(model)
# Transforming data using RFE
X_rfe = rfe.fit_transform(X_scaled, y)
# Fitting the data to model
model.fit(X_rfe, y)
print("Best accuracy score using built-in Logistic Regression: ", model.score(X_rfe, y))
print("Ranking")
rfe_coef =
|
pd.Series(X_scaled.columns, index=rfe.ranking_ - 1)
|
pandas.Series
|
from __future__ import division
"""Functions to help detect face, landmarks, emotions, action units from images and videos"""
from collections import deque
from multiprocessing.pool import ThreadPool
import os
import numpy as np
import pandas as pd
from PIL import Image, ImageDraw, ImageOps
import math
from scipy.spatial import ConvexHull
from skimage.morphology.convex_hull import grid_points_in_poly
from skimage.feature import hog
import cv2
import feat
from tqdm import tqdm
from feat.data import Fex
from feat.utils import (
get_resource_path,
face_rect_to_coords,
openface_2d_landmark_columns,
jaanet_AU_presence,
RF_AU_presence,
FEAT_EMOTION_MAPPER,
FEAT_EMOTION_COLUMNS,
FEAT_FACEBOX_COLUMNS,
FEAT_TIME_COLUMNS,
FACET_TIME_COLUMNS,
BBox,
convert68to49,
padding,
resize_with_padding,
align_face_68pts
)
from feat.au_detectors.JAANet.JAA_test import JAANet
from feat.au_detectors.DRML.DRML_test import DRMLNet
from feat.au_detectors.StatLearning.SL_test import RandomForestClassifier, SVMClassifier, LogisticClassifier
from feat.emo_detectors.ferNet.ferNet_test import ferNetModule
from feat.emo_detectors.ResMaskNet.resmasknet_test import ResMaskNet
from feat.emo_detectors.StatLearning.EmoSL_test import EmoRandomForestClassifier, EmoSVMClassifier
import torch
from feat.face_detectors.FaceBoxes.FaceBoxes_test import FaceBoxes
from feat.face_detectors.MTCNN.MTCNN_test import MTCNN
from feat.face_detectors.Retinaface import Retinaface_test
from feat.landmark_detectors.basenet_test import MobileNet_GDConv
from feat.landmark_detectors.pfld_compressed_test import PFLDInference
from feat.landmark_detectors.mobilefacenet_test import MobileFaceNet
import json
from torchvision.datasets.utils import download_url
import zipfile
class Detector(object):
def __init__(
self,
face_model="retinaface",
landmark_model="mobilenet",
au_model="rf",
emotion_model="resmasknet",
n_jobs=1,
):
"""Detector class to detect FEX from images or videos.
Detector is a class used to detect faces, facial landmarks, emotions, and action units from images and videos.
Args:
n_jobs (int, default=1): Number of processes to use for extraction.
Attributes:
info (dict):
n_jobs (int): Number of jobs to be used in parallel.
face_model (str, default=retinaface): Name of face detection model
landmark_model (str, default=mobilenet): Nam eof landmark model
au_model (str, default=rf): Name of Action Unit detection model
emotion_model (str, default=resmasknet): Path to emotion detection model.
face_detection_columns (list): Column names for face detection ouput (x, y, w, h)
face_landmark_columns (list): Column names for face landmark output (x0, y0, x1, y1, ...)
emotion_model_columns (list): Column names for emotion model output
mapper (dict): Class names for emotion model output by index.
input_shape (dict)
face_detector: face detector object
face_landmark: face_landmark object
emotion_model: emotion_model object
Examples:
>> detector = Detector(n_jobs=1)
>> detector.detect_image("input.jpg")
>> detector.detect_video("input.mp4")
"""
self.info = {}
self.info["n_jobs"] = n_jobs
if torch.cuda.is_available():
self.map_location = lambda storage, loc: storage.cuda()
else:
self.map_location = "cpu"
""" LOAD UP THE MODELS """
print("Loading Face Detection model: ", face_model)
# Check if model files have been downloaded. Otherwise download model.
# get model url.
with open(os.path.join(get_resource_path(), "model_list.json"), "r") as f:
model_urls = json.load(f)
if face_model:
for url in model_urls["face_detectors"][face_model.lower()]["urls"]:
download_url(url, get_resource_path())
if landmark_model:
for url in model_urls["landmark_detectors"][landmark_model.lower()]["urls"]:
download_url(url, get_resource_path())
if au_model:
for url in model_urls["au_detectors"][au_model.lower()]["urls"]:
download_url(url, get_resource_path())
if ".zip" in url:
import zipfile
with zipfile.ZipFile(os.path.join(get_resource_path(), "JAANetparams.zip"), 'r') as zip_ref:
zip_ref.extractall(os.path.join(get_resource_path()))
if au_model.lower() in ['logistic', 'svm', 'rf']:
download_url(
model_urls["au_detectors"]['hog-pca']['urls'][0], get_resource_path())
download_url(
model_urls["au_detectors"]['au_scalar']['urls'][0], get_resource_path())
if emotion_model:
for url in model_urls["emotion_detectors"][emotion_model.lower()]["urls"]:
download_url(url, get_resource_path())
if emotion_model.lower() in ['svm', 'rf']:
download_url(
model_urls["emotion_detectors"]['emo_pca']['urls'][0], get_resource_path())
download_url(
model_urls["emotion_detectors"]['emo_scalar']['urls'][0], get_resource_path())
if face_model:
if face_model.lower() == "faceboxes":
self.face_detector = FaceBoxes()
elif face_model.lower() == "retinaface":
self.face_detector = Retinaface_test.Retinaface()
elif face_model.lower() == "mtcnn":
self.face_detector = MTCNN()
self.info["face_model"] = face_model
facebox_columns = FEAT_FACEBOX_COLUMNS
self.info["face_detection_columns"] = facebox_columns
predictions = np.empty((1, len(facebox_columns)))
predictions[:] = np.nan
empty_facebox = pd.DataFrame(predictions, columns=facebox_columns)
self._empty_facebox = empty_facebox
print("Loading Face Landmark model: ", landmark_model)
if landmark_model:
if landmark_model.lower() == "mobilenet":
self.landmark_detector = MobileNet_GDConv(136)
self.landmark_detector = torch.nn.DataParallel(
self.landmark_detector)
checkpoint = torch.load(
os.path.join(
get_resource_path(),
"mobilenet_224_model_best_gdconv_external.pth.tar",
),
map_location=self.map_location,
)
self.landmark_detector.load_state_dict(
checkpoint["state_dict"])
elif landmark_model.lower() == "pfld":
self.landmark_detector = PFLDInference()
checkpoint = torch.load(
os.path.join(get_resource_path(),
"pfld_model_best.pth.tar"),
map_location=self.map_location,
)
self.landmark_detector.load_state_dict(
checkpoint["state_dict"])
elif landmark_model.lower() == "mobilefacenet":
self.landmark_detector = MobileFaceNet([112, 112], 136)
checkpoint = torch.load(
os.path.join(
get_resource_path(), "mobilefacenet_model_best.pth.tar"
),
map_location=self.map_location,
)
self.landmark_detector.load_state_dict(
checkpoint["state_dict"])
self.info["landmark_model"] = landmark_model
self.info["mapper"] = openface_2d_landmark_columns
landmark_columns = openface_2d_landmark_columns
self.info["face_landmark_columns"] = landmark_columns
predictions = np.empty((1, len(openface_2d_landmark_columns)))
predictions[:] = np.nan
empty_landmarks = pd.DataFrame(predictions, columns=landmark_columns)
self._empty_landmark = empty_landmarks
print("Loading au model: ", au_model)
self.info["au_model"] = au_model
if au_model:
if au_model.lower() == "jaanet":
self.au_model = JAANet()
elif au_model.lower() == "drml":
self.au_model = DRMLNet()
elif au_model.lower() == "logistic":
self.au_model = LogisticClassifier()
elif au_model.lower() == "svm":
self.au_model = SVMClassifier()
elif au_model.lower() == 'rf':
self.au_model = RandomForestClassifier()
if (au_model is None) or (au_model.lower() in ['jaanet', 'drml']):
auoccur_columns = jaanet_AU_presence
else:
auoccur_columns = RF_AU_presence
self.info["au_presence_columns"] = auoccur_columns
predictions = np.empty((1, len(auoccur_columns)))
predictions[:] = np.nan
empty_au_occurs = pd.DataFrame(predictions, columns=auoccur_columns)
self._empty_auoccurence = empty_au_occurs
print("Loading emotion model: ", emotion_model)
self.info["emotion_model"] = emotion_model
if emotion_model:
if emotion_model.lower() == "fer":
self.emotion_model = ferNetModule()
elif emotion_model.lower() == "resmasknet":
self.emotion_model = ResMaskNet()
elif emotion_model.lower() == 'svm':
self.emotion_model = EmoSVMClassifier()
elif emotion_model.lower() == 'rf':
self.emotion_model = EmoRandomForestClassifier()
self.info["emotion_model_columns"] = FEAT_EMOTION_COLUMNS
predictions = np.empty((1, len(FEAT_EMOTION_COLUMNS)))
predictions[:] = np.nan
empty_emotion = pd.DataFrame(predictions, columns=FEAT_EMOTION_COLUMNS)
self._empty_emotion = empty_emotion
predictions = np.empty((1, len(auoccur_columns)))
predictions[:] = np.nan
empty_au_occurs =
|
pd.DataFrame(predictions, columns=auoccur_columns)
|
pandas.DataFrame
|
import pandas as pd
import random
import pickle
from tqdm import tqdm
import seaborn as sns
from sklearn.metrics import *
from matplotlib import pyplot as plt
from preferences import notas_pref
from ahp import ahp
from data_preparation import create_subsample
from fine_tunning import fine_tunning
from data_preparation import merge_matrices
from tau_distance import normalised_kendall_tau_distance
len_Q = 5 # n_samples to be evaluated
CV = 5 # number of cross-validation
test_size = 0.2 # 80% train and 20% test
accepted_error = .05 # max tau distance accepted between current ranking and the predicted one
df_var = pd.read_csv("dec_5obj_p2.csv", header=None) # decision variables
# df_var = df_var.iloc[0:55, :].round(5)
df_obj = pd.read_csv('obj_5obj_p2.csv', header=None) # values in Pareto front
# df_obj = df_obj.iloc[0:55, :].round(5)
npop, nvar = df_var.shape
nobj = df_obj.shape[1]
# Generate the preferences
df_obj = df_obj.to_numpy()
df_pref = notas_pref(df_obj)
# AHP from the original alternatives
rank_ahp = ahp(df_pref).index
# Generate the index to be evaluated
index = list(df_var.index)
# Aleatory ranking
aleatory = index.copy()
random.shuffle(aleatory)
# Start an aleatory ranking
rank_aleatory = aleatory.copy()
# Distances
current_previous = []
current_ahp = []
# Metrics
mse = []
rmse = []
r2 = []
mape = []
# Iterations
iteration = []
cont = 0
temp = 1
for aux in tqdm(range(len_Q, npop, len_Q)):
cont += 1
# Define Q and N-Q indexes
Q_index = aleatory[0:aux]
N_Q_index = [x for x in index if x not in Q_index]
# Train
df_Q = create_subsample(df_var=df_var, df_pref=df_pref, nobj=nobj, index=Q_index)
X_train = df_Q.iloc[:, :-nobj] # to predict
y_train = df_Q.iloc[:, -nobj:] # real targets
# Test
df_N_Q = create_subsample(df_var=df_var, df_pref=df_pref, nobj=nobj, index=N_Q_index)
X_test = df_N_Q.iloc[:, :-nobj] # to predict
y_test = df_N_Q.iloc[:, -nobj:] # real targets
# Model training
if temp > accepted_error:
tuned_model = fine_tunning(CV, X_train, y_train)
with open("tuned_model_cbic_5obj.pkl", 'wb') as arq: # Save best model
pickle.dump(tuned_model, arq)
tuned_model.fit(X_train, y_train)
else:
with open("tuned_model_cbic_5obj.pkl", "rb") as fp: # Load trained model
tuned_model = pickle.load(fp)
# Model evaluation
y_pred = tuned_model.predict(X_test)
y_pred = pd.DataFrame(y_pred)
# Metrics
mse.append(mean_squared_error(pd.DataFrame(y_test.values), pd.DataFrame(y_pred.values), squared=True))
rmse.append(mean_squared_error(
|
pd.DataFrame(y_test.values)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 31 14:24:17 2020
@author: Lab408
"""
#########################KFOLD_Lightgbm#######################################
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import lightgbm as lgb
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import KFold
import datetime
import gc
DATA_PATH ='/Users/Lab408/Desktop/try_model_ashrae_energy_prediction_kaggle/'
##Load data
train_df = pd.read_csv(DATA_PATH + 'small_data_train_energy.csv')
# Remove outliers移除異常值
train_df = train_df [ train_df['building_id'] != 1099 ]
train_df = train_df.query('not (building_id <= 104 & meter == 0 & timestamp <= "2016-05-20")')
building_df = pd.read_csv(DATA_PATH + 'building_metadata_forsmalldata.csv')
weather_df =
|
pd.read_csv(DATA_PATH + 'weather_train_smalldata.csv')
|
pandas.read_csv
|
# pylint: disable=E1101
from datetime import datetime
import datetime as dt
import os
import warnings
import nose
import struct
import sys
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas.compat import iterkeys
from pandas.core.frame import DataFrame, Series
from pandas.core.common import is_categorical_dtype
from pandas.io.parsers import read_csv
from pandas.io.stata import (read_stata, StataReader, InvalidColumnName,
PossiblePrecisionLoss, StataMissingValue)
import pandas.util.testing as tm
from pandas.tslib import NaT
from pandas import compat
class TestStata(tm.TestCase):
def setUp(self):
self.dirpath = tm.get_data_path()
self.dta1_114 = os.path.join(self.dirpath, 'stata1_114.dta')
self.dta1_117 = os.path.join(self.dirpath, 'stata1_117.dta')
self.dta2_113 = os.path.join(self.dirpath, 'stata2_113.dta')
self.dta2_114 = os.path.join(self.dirpath, 'stata2_114.dta')
self.dta2_115 = os.path.join(self.dirpath, 'stata2_115.dta')
self.dta2_117 = os.path.join(self.dirpath, 'stata2_117.dta')
self.dta3_113 = os.path.join(self.dirpath, 'stata3_113.dta')
self.dta3_114 = os.path.join(self.dirpath, 'stata3_114.dta')
self.dta3_115 = os.path.join(self.dirpath, 'stata3_115.dta')
self.dta3_117 = os.path.join(self.dirpath, 'stata3_117.dta')
self.csv3 = os.path.join(self.dirpath, 'stata3.csv')
self.dta4_113 = os.path.join(self.dirpath, 'stata4_113.dta')
self.dta4_114 = os.path.join(self.dirpath, 'stata4_114.dta')
self.dta4_115 = os.path.join(self.dirpath, 'stata4_115.dta')
self.dta4_117 = os.path.join(self.dirpath, 'stata4_117.dta')
self.dta_encoding = os.path.join(self.dirpath, 'stata1_encoding.dta')
self.csv14 = os.path.join(self.dirpath, 'stata5.csv')
self.dta14_113 = os.path.join(self.dirpath, 'stata5_113.dta')
self.dta14_114 = os.path.join(self.dirpath, 'stata5_114.dta')
self.dta14_115 = os.path.join(self.dirpath, 'stata5_115.dta')
self.dta14_117 = os.path.join(self.dirpath, 'stata5_117.dta')
self.csv15 = os.path.join(self.dirpath, 'stata6.csv')
self.dta15_113 = os.path.join(self.dirpath, 'stata6_113.dta')
self.dta15_114 = os.path.join(self.dirpath, 'stata6_114.dta')
self.dta15_115 = os.path.join(self.dirpath, 'stata6_115.dta')
self.dta15_117 = os.path.join(self.dirpath, 'stata6_117.dta')
self.dta16_115 = os.path.join(self.dirpath, 'stata7_115.dta')
self.dta16_117 = os.path.join(self.dirpath, 'stata7_117.dta')
self.dta17_113 = os.path.join(self.dirpath, 'stata8_113.dta')
self.dta17_115 = os.path.join(self.dirpath, 'stata8_115.dta')
self.dta17_117 = os.path.join(self.dirpath, 'stata8_117.dta')
self.dta18_115 = os.path.join(self.dirpath, 'stata9_115.dta')
self.dta18_117 = os.path.join(self.dirpath, 'stata9_117.dta')
self.dta19_115 = os.path.join(self.dirpath, 'stata10_115.dta')
self.dta19_117 = os.path.join(self.dirpath, 'stata10_117.dta')
self.dta20_115 = os.path.join(self.dirpath, 'stata11_115.dta')
self.dta20_117 = os.path.join(self.dirpath, 'stata11_117.dta')
self.dta21_117 = os.path.join(self.dirpath, 'stata12_117.dta')
def read_dta(self, file):
# Legacy default reader configuration
return read_stata(file, convert_dates=True)
def read_csv(self, file):
return read_csv(file, parse_dates=True)
def test_read_empty_dta(self):
empty_ds = DataFrame(columns=['unit'])
# GH 7369, make sure can read a 0-obs dta file
with tm.ensure_clean() as path:
empty_ds.to_stata(path,write_index=False)
empty_ds2 = read_stata(path)
tm.assert_frame_equal(empty_ds, empty_ds2)
def test_data_method(self):
# Minimal testing of legacy data method
reader_114 = StataReader(self.dta1_114)
with warnings.catch_warnings(record=True) as w:
parsed_114_data = reader_114.data()
reader_114 = StataReader(self.dta1_114)
parsed_114_read = reader_114.read()
tm.assert_frame_equal(parsed_114_data, parsed_114_read)
def test_read_dta1(self):
reader_114 = StataReader(self.dta1_114)
parsed_114 = reader_114.read()
reader_117 = StataReader(self.dta1_117)
parsed_117 = reader_117.read()
# Pandas uses np.nan as missing value.
# Thus, all columns will be of type float, regardless of their name.
expected = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
# this is an oddity as really the nan should be float64, but
# the casting doesn't fail so need to match stata here
expected['float_miss'] = expected['float_miss'].astype(np.float32)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta2(self):
if LooseVersion(sys.version) < '2.7':
raise nose.SkipTest('datetime interp under 2.6 is faulty')
expected = DataFrame.from_records(
[
(
datetime(2006, 11, 19, 23, 13, 20),
1479596223000,
datetime(2010, 1, 20),
datetime(2010, 1, 8),
datetime(2010, 1, 1),
datetime(1974, 7, 1),
datetime(2010, 1, 1),
datetime(2010, 1, 1)
),
(
datetime(1959, 12, 31, 20, 3, 20),
-1479590,
datetime(1953, 10, 2),
datetime(1948, 6, 10),
datetime(1955, 1, 1),
datetime(1955, 7, 1),
datetime(1955, 1, 1),
datetime(2, 1, 1)
),
(
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
)
],
columns=['datetime_c', 'datetime_big_c', 'date', 'weekly_date',
'monthly_date', 'quarterly_date', 'half_yearly_date',
'yearly_date']
)
expected['yearly_date'] = expected['yearly_date'].astype('O')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed_114 = self.read_dta(self.dta2_114)
parsed_115 = self.read_dta(self.dta2_115)
parsed_117 = self.read_dta(self.dta2_117)
# 113 is buggy due to limits of date format support in Stata
# parsed_113 = self.read_dta(self.dta2_113)
# Remove resource warnings
w = [x for x in w if x.category is UserWarning]
# should get warning for each call to read_dta
tm.assert_equal(len(w), 3)
# buggy test because of the NaT comparison on certain platforms
# Format 113 test fails since it does not support tc and tC formats
# tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta3(self):
parsed_113 = self.read_dta(self.dta3_113)
parsed_114 = self.read_dta(self.dta3_114)
parsed_115 = self.read_dta(self.dta3_115)
parsed_117 = self.read_dta(self.dta3_117)
# match stata here
expected = self.read_csv(self.csv3)
expected = expected.astype(np.float32)
expected['year'] = expected['year'].astype(np.int16)
expected['quarter'] = expected['quarter'].astype(np.int8)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta4(self):
parsed_113 = self.read_dta(self.dta4_113)
parsed_114 = self.read_dta(self.dta4_114)
parsed_115 = self.read_dta(self.dta4_115)
parsed_117 = self.read_dta(self.dta4_117)
expected = DataFrame.from_records(
[
["one", "ten", "one", "one", "one"],
["two", "nine", "two", "two", "two"],
["three", "eight", "three", "three", "three"],
["four", "seven", 4, "four", "four"],
["five", "six", 5, np.nan, "five"],
["six", "five", 6, np.nan, "six"],
["seven", "four", 7, np.nan, "seven"],
["eight", "three", 8, np.nan, "eight"],
["nine", "two", 9, np.nan, "nine"],
["ten", "one", "ten", np.nan, "ten"]
],
columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled',
'labeled_with_missings', 'float_labelled'])
# these are all categoricals
expected = pd.concat([expected[col].astype('category') for col in expected], axis=1)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
# File containing strls
def test_read_dta12(self):
parsed_117 = self.read_dta(self.dta21_117)
expected = DataFrame.from_records(
[
[1, "abc", "abcdefghi"],
[3, "cba", "qwertywertyqwerty"],
[93, "", "strl"],
],
columns=['x', 'y', 'z'])
tm.assert_frame_equal(parsed_117, expected, check_dtype=False)
def test_read_write_dta5(self):
original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_write_dta6(self):
original = self.read_csv(self.csv3)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['year'] = original['year'].astype(np.int32)
original['quarter'] = original['quarter'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_read_write_dta10(self):
original = DataFrame(data=[["string", "object", 1, 1.1,
np.datetime64('2003-12-25')]],
columns=['string', 'object', 'integer', 'floating',
'datetime'])
original["object"] = Series(original["object"], dtype=object)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['integer'] = original['integer'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, {'datetime': 'tc'})
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_stata_doc_examples(self):
with tm.ensure_clean() as path:
df = DataFrame(np.random.randn(10, 2), columns=list('AB'))
df.to_stata(path)
def test_write_preserves_original(self):
# 9795
np.random.seed(423)
df = pd.DataFrame(np.random.randn(5,4), columns=list('abcd'))
df.ix[2, 'a':'c'] = np.nan
df_copy = df.copy()
df.to_stata('test.dta', write_index=False)
tm.assert_frame_equal(df, df_copy)
def test_encoding(self):
# GH 4626, proper encoding handling
raw = read_stata(self.dta_encoding)
encoded = read_stata(self.dta_encoding, encoding="latin-1")
result = encoded.kreis1849[0]
if compat.PY3:
expected = raw.kreis1849[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, compat.string_types)
else:
expected = raw.kreis1849.str.decode("latin-1")[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, unicode)
with tm.ensure_clean() as path:
encoded.to_stata(path,encoding='latin-1', write_index=False)
reread_encoded = read_stata(path, encoding='latin-1')
tm.assert_frame_equal(encoded, reread_encoded)
def test_read_write_dta11(self):
original = DataFrame([(1, 2, 3, 4)],
columns=['good', compat.u('b\u00E4d'), '8number', 'astringwithmorethan32characters______'])
formatted = DataFrame([(1, 2, 3, 4)],
columns=['good', 'b_d', '_8number', 'astringwithmorethan32characters_'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
# should get a warning for that format.
tm.assert_equal(len(w), 1)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted)
def test_read_write_dta12(self):
original = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_1',
'astringwithmorethan32characters_2',
'+',
'-',
'short',
'delete'])
formatted = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_',
'_0astringwithmorethan32character',
'_',
'_1_',
'_short',
'_delete'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
tm.assert_equal(len(w), 1) # should get a warning for that format.
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), formatted)
def test_read_write_dta13(self):
s1 = Series(2**9, dtype=np.int16)
s2 = Series(2**17, dtype=np.int32)
s3 = Series(2**33, dtype=np.int64)
original = DataFrame({'int16': s1, 'int32': s2, 'int64': s3})
original.index.name = 'index'
formatted = original
formatted['int64'] = formatted['int64'].astype(np.float64)
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
formatted)
def test_read_write_reread_dta14(self):
expected = self.read_csv(self.csv14)
cols = ['byte_', 'int_', 'long_', 'float_', 'double_']
for col in cols:
expected[col] = expected[col].convert_objects(convert_numeric=True)
expected['float_'] = expected['float_'].astype(np.float32)
expected['date_td'] = pd.to_datetime(expected['date_td'], coerce=True)
parsed_113 = self.read_dta(self.dta14_113)
parsed_113.index.name = 'index'
parsed_114 = self.read_dta(self.dta14_114)
parsed_114.index.name = 'index'
parsed_115 = self.read_dta(self.dta14_115)
parsed_115.index.name = 'index'
parsed_117 = self.read_dta(self.dta14_117)
parsed_117.index.name = 'index'
tm.assert_frame_equal(parsed_114, parsed_113)
tm.assert_frame_equal(parsed_114, parsed_115)
tm.assert_frame_equal(parsed_114, parsed_117)
with tm.ensure_clean() as path:
parsed_114.to_stata(path, {'date_td': 'td'})
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'), parsed_114)
def test_read_write_reread_dta15(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(datetime.strptime, args=('%Y-%m-%d',))
parsed_113 = self.read_dta(self.dta15_113)
parsed_114 = self.read_dta(self.dta15_114)
parsed_115 = self.read_dta(self.dta15_115)
parsed_117 = self.read_dta(self.dta15_117)
tm.assert_frame_equal(expected, parsed_114)
tm.assert_frame_equal(parsed_113, parsed_114)
tm.assert_frame_equal(parsed_114, parsed_115)
tm.assert_frame_equal(parsed_114, parsed_117)
def test_timestamp_and_label(self):
original = DataFrame([(1,)], columns=['var'])
time_stamp = datetime(2000, 2, 29, 14, 21)
data_label = 'This is a data file.'
with tm.ensure_clean() as path:
original.to_stata(path, time_stamp=time_stamp, data_label=data_label)
reader =
|
StataReader(path)
|
pandas.io.stata.StataReader
|
# General Packages
from math import atan2, degrees
from datetime import datetime
from pathlib import Path
import time
import pprint
import numpy as np
import pandas as pd
import pickle
# Plotting
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
from matplotlib.dates import date2num
import seaborn as sns
# Scaling
from sklearn.preprocessing import StandardScaler
settings = {
#
# audit settings
'data_name': 'credit',
'method_name': 'logreg',
'normalize_data': True,
'force_rational_actions': False,
#
# script flags
'audit_recourse': True,
'plot_audits': True,
'print_flag': True,
'save_flag': True,
'randomseed': 2338,
#
# placeholders
'method_suffixes': [''],
'audit_suffixes': [''],
}
# Paths
repo_dir = Path(__file__).absolute().parent.parent
paper_dir = repo_dir / 'paper/' # directory containing paper related info
data_dir = paper_dir / 'data/' # directory containing data files
results_dir = paper_dir / 'results/' # directory containing results
# create directories that don't exist
for d in [data_dir, results_dir]:
d.mkdir(exist_ok = True)
# Formatting Options
np.set_printoptions(precision = 4, suppress = False)
pd.set_option('display.max_columns', 30)
pd.options.mode.chained_assignment = None
pp = pprint.PrettyPrinter(indent = 4)
# Plotting Settings
sns.set(style="white", palette="muted", color_codes = True)
plt.rcParams['font.size'] = 20
plt.rcParams['axes.labelsize'] = 24
plt.rcParams['axes.spines.top'] = False
plt.rcParams['axes.spines.right'] = False
plt.rcParams['xtick.labelsize'] = 20
plt.rcParams['ytick.labelsize'] = 20
plt.rc('legend', fontsize = 20)
# file names
output_dir = results_dir / settings['data_name']
output_dir.mkdir(exist_ok = True)
if settings['normalize_data']:
settings['method_suffixes'].append('normalized')
if settings['force_rational_actions']:
settings['audit_suffixes'].append('rational')
# set file header
settings['dataset_file'] = '%s/%s_processed.csv' % (data_dir, settings['data_name'])
settings['file_header'] = '%s/%s_%s%s' % (output_dir, settings['data_name'], settings['method_name'], '_'.join(settings['method_suffixes']))
settings['audit_file_header'] = '%s%s' % (settings['file_header'], '_'.join(settings['audit_suffixes']))
settings['model_file'] = '%s_models.pkl' % settings['file_header']
settings['audit_file'] = '%s_audit_results.pkl' % settings['audit_file_header']
# Recourse Objects
from recourse.action_set import ActionSet
from recourse.builder import RecourseBuilder
from recourse.auditor import RecourseAuditor
from recourse.flipset import Flipset
### Helper Functions for Experimental Script
def load_data():
"""Helper function to load in data, and output that and optionally a scaler object:
Output:
data: dict with the following fields
outcome_name: Name of the outcome variable (inferred as the first column.)
variable_names: A list of names indicating input columns.
X: The input features for our model.
y: The column of the dataframe indicating our outcome variable.
scaler: The sklearn StandardScaler used to normalize the dataset, if we wish to scale.
X_scaled: Scaled version of X, if we wish to scale
X_train: The training set: set to the whole dataset if not scaled. Set to X_scaled if we do scale.
scaler:
Object used to scale data. If "scale" is set to None, then this is returned as None.
"""
# data set
data_df =
|
pd.read_csv(settings['dataset_file'])
|
pandas.read_csv
|
from src.sql_table import DummySqlDB
import pandas as pd
import pytest
@pytest.fixture()
def mocking_session():
class SessionMocker():
def __init__(self):
pass
def execute(self, *args):
if args:
return [arg for arg in args]
def commit(self):
print('committed')
def close(self):
print('closed')
yield SessionMocker
class TestDummyDB():
@pytest.fixture()
# This fixture will only be available within the scope of TestGroup
def mock(self, mocker):
mocker.patch('src.sql_table.DummySqlDB._create_engine').return_value = 'test_string'
mocker.patch('src.sql_table.DummySqlDB.query_sql').return_value = pd.DataFrame({'user': ['test']})
def test_transform(self, mock):
cls = DummySqlDB()
actual_features = cls.transform()
expect_frame = pd.DataFrame({'user': ['test'],'test':['test']})
|
pd.testing.assert_frame_equal(actual_features, expect_frame)
|
pandas.testing.assert_frame_equal
|
#an oject for importing/exporting .csv files.
import pandas as pd
class CSV:
#init- no mandatory parameters! where this is used coming or going, it needs to be flexible
def __init__(self, filepath, filesep = ',', importData=pd.DataFrame([])):
#where this is intended to be used coming and going, dat will be initialized empty. Populate it via 'objName.dat=<dataframe>' (if worried about memory usage, instead pass in data through the 'importdata' parameter on creation), or 'getFile()'
#it will expect meaningful, and SQL-compliant column names. The automatic cleaning is limited at best, so please clean headers before import.
#init data:
if not type(importData)=='str': #I know this is hacky, but pandas doesn't use bool()
self.dat = importData #this will be updated by initial functions
#if no input provided, make it a dataframe
else:
self.dat = pd.DataFrame()
#path to file:
self.fpath = filepath
#deliminator for .csv file:
self.sep = filesep
#needed to see if SQL table exists:
self.File_exists = False
#run startup tasks:
#see if file exists at path:
self.checkForFile()
if self.File_exists:
self.getFile()
##################################################################################
##################################################################################
##################################################################################
#checkForFile(): see if file is there
def checkForFile(self):
import pandas as pd
self.File_exists = False
tst=
|
pd.DataFrame()
|
pandas.DataFrame
|
from functools import reduce
from abc import ABC, abstractmethod
from dataclasses import dataclass
import numpy as np
import pandas as pd
import os
class Data(ABC):
@abstractmethod
def classification(self):
"""Classification function"""
@abstractmethod
def classification_age_generation(self):
"""Classification according age generation of members in household"""
@abstractmethod
def classification_sex_age(self):
"""Classification according sex household referent person and age of members"""
@abstractmethod
def clean_data(self):
"""Clean data with more than 10% of null values"""
@abstractmethod
def compute_representativity(self):
"""Compute representativity of energy consumption"""
@abstractmethod
def covariance_matrix(self):
"""Compute covariance matrix with respect energy variable"""
@abstractmethod
def drop_nonessential_columns(self):
"""Removes non-essential columns for analysis"""
@abstractmethod
def give_nodes(self):
"""Gives the name of the nodes per type of classification"""
@abstractmethod
def read_data(self):
"""Return ENIGH dataframe"""
@abstractmethod
def read_tables(self):
"""Read tables from ENIGH database"""
@abstractmethod
def proportion_nan(self):
"""Compute proportion of missing values for variables in ENIGH dataset"""
@abstractmethod
def standardization(self):
"""Standarization of dataset using Z-score per node type"""
@dataclass
class ENIGH_Data(Data):
"""Class that contains ENIGH data for year"""
year: int = 2016
clean: bool = True
type_class: str = "Sex_HHRP_Age"
def classification(self, keep_columns=False) -> pd.DataFrame:
"""Classification function"""
if keep_columns:
if self.type_class == "SexHHRP_Age":
return self.classification_sex_age(self.read_data())
elif self.type_class == "Age_Generation":
return self.classification_age_generation(self.read_data())
else:
if self.type_class == "SexHHRP_Age":
dataset = self.classification_sex_age(self.read_data())
dataset.drop(columns=["sex_hhrp","age"], inplace=True)
return dataset
elif self.type_class == "Age_Generation":
return self.classification_age_generation(self.read_data())
def classification_age_generation(self, dataset: pd.DataFrame) -> pd.DataFrame:
"""Classification according generation of members in household"""
if self.year == 2016:
generation = [dataset.edad<=16,
(dataset.edad>16) & (dataset.edad<=26),
(dataset.edad>26) & (dataset.edad<=36),
(dataset.edad>36) & (dataset.edad<=46),
(dataset.edad>46) & (dataset.edad<=56),
(dataset.edad>56) & (dataset.edad>=66)]
choices = self.give_nodes()[:-1]
dataset["node"] = np.select(generation, choices, default="G_older_50s")
elif self.year == 2018:
generation = [dataset.edad<=18,
(dataset.edad>18) & (dataset.edad<=28),
(dataset.edad>28) & (dataset.edad<=38),
(dataset.edad>38) & (dataset.edad<=48),
(dataset.edad>48) & (dataset.edad<=58),
(dataset.edad>58) & (dataset.edad>=68)]
choices = self.give_nodes()[:-1]
dataset["node"] = np.select(generation, choices, default="G_older_50s")
elif self.year == 2020:
generation = [dataset.edad<=20,
(dataset.edad>20) & (dataset.edad<=30),
(dataset.edad>30) & (dataset.edad<=40),
(dataset.edad>40) & (dataset.edad<=50),
(dataset.edad>50) & (dataset.edad<=60),
(dataset.edad>60) & (dataset.edad>=70)]
choices = self.give_nodes()[:-1]
dataset["node"] = np.select(generation, choices, default="G_older_50s")
return dataset
def classification_sex_age(self, dataset: pd.DataFrame) -> pd.DataFrame:
"""Classification according sex of household referent person and age"""
#Sex classification
sexHHRP = [(dataset.sexo_jefe==1),
(dataset.sexo_jefe==2)]
choices = ["H","M"]
dataset["sex_hhrp"] = np.select(sexHHRP, choices, default="empty")
#age classification
hh_members = [
(dataset.p12_64>0) & (dataset.p65mas==0) & (dataset.menores==0),
(dataset.p12_64>0) & (dataset.p65mas==0) & (dataset.menores>0),
(dataset.p12_64>0) & (dataset.p65mas>0) & (dataset.menores==0),
(dataset.p12_64==0) & (dataset.p65mas>0) & (dataset.menores>0),
(dataset.p12_64==0) & (dataset.p65mas>0) & (dataset.menores==0),
(dataset.p12_64>0) & (dataset.p65mas>0) & (dataset.menores>0)]
choices = ["1","2","3","4","5","6"]
dataset["age"] = np.select(hh_members, choices, default="empty")
dataset["node"] = dataset.sex_hhrp + dataset.age
return dataset
def clean_data(self, dataset: pd.DataFrame) -> pd.DataFrame:
"""Clean data with more than 10% if null values"""
column_missing = list()
for column in dataset.columns:
proportion = np.mean(dataset[column].isnull())
if (proportion>=0.1):
column_missing = np.append(column_missing, column)
dataset.drop(columns=list(column_missing),inplace=True)
dataset = dataset.dropna()
return dataset
def compute_representativity(self, covariance_matrix):
"""Compute representativity of energy consumption"""
representativity = dict()
for node in covariance_matrix.columns.unique():
proportion = (covariance_matrix[node]/sum(covariance_matrix[node])).sort_values(
ascending=False).cumsum()
representativity[node] = proportion
representativity[node] = representativity[node].to_frame()
representativity[node]["id"] = range(1,len(representativity[node])+1)
representativity[node]["covariance"] = covariance_matrix[node]
return representativity
def covariance_matrix(self) -> pd.DataFrame:
"""Compute covariance matrix with respect energy variable"""
dict_covariance = dict()
list_dataset_nodes = list()
dict_standardize = self.standardization()
for node in dict_standardize.keys():
dict_covariance[node] = abs(dict_standardize[node].cov().energia)
dict_covariance[node].drop(["energia","vivienda"], inplace=True)
dict_covariance[node].rename(node, inplace=True)
list_dataset_nodes.append(dict_covariance[node])
covariance_matrix =
|
pd.concat(list_dataset_nodes, axis=1)
|
pandas.concat
|
import numpy as np
import pandas as pd
from datetime import datetime
import pytest
import empyrical
from vectorbt import defaults
from vectorbt.records.drawdowns import Drawdowns
from tests.utils import isclose
day_dt = np.timedelta64(86400000000000)
index = pd.DatetimeIndex([
datetime(2018, 1, 1),
datetime(2018, 1, 2),
datetime(2018, 1, 3),
datetime(2018, 1, 4),
datetime(2018, 1, 5)
])
ts = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [5, 4, 3, 2, 1],
'c': [1, 2, 3, 2, 1]
}, index=index)
ret = ts.pct_change()
defaults.returns['year_freq'] = '252 days' # same as empyrical
factor_returns = pd.DataFrame({
'a': ret['a'] * np.random.uniform(0.8, 1.2, ret.shape[0]),
'b': ret['b'] * np.random.uniform(0.8, 1.2, ret.shape[0]) * 2,
'c': ret['c'] * np.random.uniform(0.8, 1.2, ret.shape[0]) * 3
})
# ############# accessors.py ############# #
class TestAccessors:
def test_freq(self):
assert ret.vbt.returns.freq == day_dt
assert ret['a'].vbt.returns.freq == day_dt
assert ret.vbt.returns(freq='2D').freq == day_dt * 2
assert ret['a'].vbt.returns(freq='2D').freq == day_dt * 2
assert pd.Series([1, 2, 3]).vbt.returns.freq is None
assert pd.Series([1, 2, 3]).vbt.returns(freq='3D').freq == day_dt * 3
assert pd.Series([1, 2, 3]).vbt.returns(freq=np.timedelta64(4, 'D')).freq == day_dt * 4
def test_year_freq(self):
assert ret.vbt.returns.year_freq == pd.to_timedelta(defaults.returns['year_freq'])
assert ret['a'].vbt.returns.year_freq == pd.to_timedelta(defaults.returns['year_freq'])
assert ret['a'].vbt.returns(year_freq='365 days').year_freq == pd.to_timedelta('365 days')
assert ret.vbt.returns(year_freq='365 days').year_freq == pd.to_timedelta('365 days')
def test_ann_factor(self):
assert ret['a'].vbt.returns(year_freq='365 days').ann_factor == 365
assert ret.vbt.returns(year_freq='365 days').ann_factor == 365
with pytest.raises(Exception) as e_info:
assert pd.Series([1, 2, 3]).vbt.returns(freq=None).ann_factor
def test_from_price(self):
pd.testing.assert_series_equal(pd.Series.vbt.returns.from_price(ts['a'])._obj, ts['a'].pct_change())
pd.testing.assert_frame_equal(pd.DataFrame.vbt.returns.from_price(ts)._obj, ts.pct_change())
assert pd.Series.vbt.returns.from_price(ts['a'], year_freq='365 days').year_freq == pd.to_timedelta('365 days')
assert pd.DataFrame.vbt.returns.from_price(ts, year_freq='365 days').year_freq == pd.to_timedelta('365 days')
def test_daily(self):
ret_12h = pd.DataFrame({
'a': [0.1, 0.1, 0.1, 0.1, 0.1],
'b': [-0.1, -0.1, -0.1, -0.1, -0.1],
'c': [0.1, -0.1, 0.1, -0.1, 0.1]
}, index=pd.DatetimeIndex([
datetime(2018, 1, 1, 0),
datetime(2018, 1, 1, 12),
datetime(2018, 1, 2, 0),
datetime(2018, 1, 2, 12),
datetime(2018, 1, 3, 0)
]))
pd.testing.assert_series_equal(
ret_12h['a'].vbt.returns.daily(),
pd.Series(
np.array([0.21, 0.21, 0.1]),
index=pd.DatetimeIndex([
'2018-01-01',
'2018-01-02',
'2018-01-03'
], dtype='datetime64[ns]', freq='D'),
name=ret_12h['a'].name
)
)
pd.testing.assert_frame_equal(
ret_12h.vbt.returns.daily(),
pd.DataFrame(
np.array([
[0.21, -0.19, -0.01],
[0.21, -0.19, -0.01],
[0.1, -0.1, 0.1]
]),
index=pd.DatetimeIndex([
'2018-01-01',
'2018-01-02',
'2018-01-03'
], dtype='datetime64[ns]', freq='D'),
columns=ret_12h.columns
)
)
def test_annual(self):
pd.testing.assert_series_equal(
ret['a'].vbt.returns.annual(),
pd.Series(
np.array([4.]),
index=pd.DatetimeIndex(['2018-01-01'], dtype='datetime64[ns]', freq='252D'),
name=ret['a'].name
)
)
pd.testing.assert_frame_equal(
ret.vbt.returns.annual(),
pd.DataFrame(
np.array([[4., -0.8, 0.]]),
index=pd.DatetimeIndex(['2018-01-01'], dtype='datetime64[ns]', freq='252D'),
columns=ret.columns
)
)
def test_cumulative(self):
res_a = empyrical.cum_returns(ret['a']).rename('a')
res_b = empyrical.cum_returns(ret['b']).rename('b')
res_c = empyrical.cum_returns(ret['c']).rename('c')
pd.testing.assert_series_equal(
ret['a'].vbt.returns.cumulative(),
res_a
)
pd.testing.assert_frame_equal(
ret.vbt.returns.cumulative(),
pd.concat([res_a, res_b, res_c], axis=1)
)
def test_total(self):
res_a = empyrical.cum_returns_final(ret['a'])
res_b = empyrical.cum_returns_final(ret['b'])
res_c = empyrical.cum_returns_final(ret['c'])
assert isclose(ret['a'].vbt.returns.total(), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.total(),
pd.Series([res_a, res_b, res_c], index=ret.columns)
)
def test_annualized_return(self):
res_a = empyrical.annual_return(ret['a'])
res_b = empyrical.annual_return(ret['b'])
res_c = empyrical.annual_return(ret['c'])
assert isclose(ret['a'].vbt.returns.annualized_return(), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.annualized_return(),
pd.Series([res_a, res_b, res_c], index=ret.columns)
)
@pytest.mark.parametrize(
"test_alpha",
[1., 2., 3.],
)
def test_annualized_volatility(self, test_alpha):
res_a = empyrical.annual_volatility(ret['a'], alpha=test_alpha)
res_b = empyrical.annual_volatility(ret['b'], alpha=test_alpha)
res_c = empyrical.annual_volatility(ret['c'], alpha=test_alpha)
assert isclose(ret['a'].vbt.returns.annualized_volatility(levy_alpha=test_alpha), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.annualized_volatility(levy_alpha=test_alpha),
pd.Series([res_a, res_b, res_c], index=ret.columns)
)
def test_calmar_ratio(self):
res_a = empyrical.calmar_ratio(ret['a'])
res_b = empyrical.calmar_ratio(ret['b'])
res_c = empyrical.calmar_ratio(ret['c'])
assert isclose(ret['a'].vbt.returns.calmar_ratio(), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.calmar_ratio(),
pd.Series([res_a, res_b, res_c], index=ret.columns)
)
@pytest.mark.parametrize(
"test_risk_free,test_required_return",
[(0.01, 0.1), (0.02, 0.2), (0.03, 0.3)],
)
def test_omega_ratio(self, test_risk_free, test_required_return):
res_a = empyrical.omega_ratio(ret['a'], risk_free=test_risk_free, required_return=test_required_return)
if np.isnan(res_a):
res_a = np.inf
res_b = empyrical.omega_ratio(ret['b'], risk_free=test_risk_free, required_return=test_required_return)
if np.isnan(res_b):
res_b = np.inf
res_c = empyrical.omega_ratio(ret['c'], risk_free=test_risk_free, required_return=test_required_return)
if np.isnan(res_c):
res_c = np.inf
assert isclose(ret['a'].vbt.returns.omega_ratio(
risk_free=test_risk_free, required_return=test_required_return), res_a)
pd.testing.assert_series_equal(
ret.vbt.returns.omega_ratio(risk_free=test_risk_free, required_return=test_required_return),
|
pd.Series([res_a, res_b, res_c], index=ret.columns)
|
pandas.Series
|
from functools import partial
import itertools
import multiprocessing
import os
from pprint import pprint
import random
from typing import Sequence
import time
import click
import pandas as pd
import screed
from sourmash.logging import notify
# Divergence time estimates in millions of years
# from http://www.timetree.org/ on 2019-08-26
from orpheum.sequence_encodings import (
amino_keto_ize,
weak_strong_ize,
purine_pyrimidize,
encode_peptide,
)
MOLECULES_TO_COMPARE = (
"peptide20",
"hsdm17",
"sdm12",
"aa9",
"botvinnik8",
"dayhoff6",
"gbmr4",
"hp2",
)
divergence_estimates = pd.Series(
{
"Amniota": 312,
"Bilateria": 824,
"Boreoeutheria": 96,
# Old world monkeys
"Catarrhini": 29.4,
"Euarchontoglires": 76,
# Bony vertebrates
"Euteleostomi": 435,
"Eutheria": 105,
# Jawed vertebrates
"Gnathostomata": 473,
# A primate suborder
"Haplorrhini": 67,
# Great apes (includes orangutan)
"Hominidae": 15.8,
# Gorilla, human, chimp
"Homininae": 9.1,
# Apes (includes gibbons)
"Hominoidea": 20.2,
"Mammalia": 177,
"Opisthokonta": 1105,
"Primates": 74,
# tetrapods and the lobe-finned fishes
"Sarcopterygii": 413,
"Simiiformes": 43,
# Tetrapods - 4-limbed
"Tetrapoda": 352,
# Includes Eutheria (placental mammals) and
# Metatheria (maruspials)
"Theria": 159,
"NA": 0,
}
)
divergence_estimates = divergence_estimates.sort_values()
KSIZES = (
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
23,
24,
25,
)
COLUMNS = "id1", "id2", "ksize", "jaccard"
# Hydrophobic/hydrophilic mapping
# From: <NAME>., <NAME>., & <NAME>. (2018).
# Proteome-wide comparison between the amino acid composition of domains and
# linkers. BMC Research Notes, 1–6. http://doi.org/10.1186/s13104-018-3221-0
def sanitize_id(value):
"""Takes first non-whitespace as ID, replaces pipes with underscore
Cribbed from https://stackoverflow.com/a/295466/1628971
"""
value = value.split()[0].replace("|", "__")
return value
def kmerize(seq, ksize):
"""Return the set of unique k-mers from the sequence"""
return set(seq[i : i + ksize] for i in range(len(seq) - ksize + 1))
def jaccardize(set1, set2):
"""Compute jaccard index of two sets"""
denominator = min(len(set1), len(set2))
if denominator > 0:
return len(set1.intersection(set2)) / denominator
else:
return denominator
def kmerize_and_jaccard(seq1, seq2, ksize, debug=False):
kmers1 = set(seq1[i : i + ksize] for i in range(len(seq1) - ksize + 1))
kmers2 = set(seq2[i : i + ksize] for i in range(len(seq2) - ksize + 1))
jaccard = jaccardize(kmers1, kmers2)
if debug:
print("len(kmers1):", len(kmers1))
print("len(kmers2):", len(kmers2))
print(f"jaccard: {jaccard}")
return jaccard
def kmer_comparison_table(id1, seq1, id2, seq2, molecule_name, ksizes=KSIZES):
lines = []
for ksize in ksizes:
jaccard = kmerize_and_jaccard(seq1, seq2, ksize)
if jaccard > 0:
line = [id1, id2, ksize, jaccard]
lines.append(line)
else:
# If jaccard=0 at a small ksize, then all future jaccards will also
# be 0 --> break and exit
remaining_lines = [[id1, id2, k, 0] for k in range(ksize, max(ksizes) + 1)]
lines.extend(remaining_lines)
break
df = pd.DataFrame(lines, columns=COLUMNS)
df["alphabet"] = molecule_name
return df
def compare_peptide_seqs(
id1_seq1, id2_seq2, ksizes=KSIZES, alphabets=MOLECULES_TO_COMPARE
):
# Unpack the tuples
id1, seq1 = id1_seq1
id2, seq2 = id2_seq2
dfs = []
for alphabet in alphabets:
reencoded1 = encode_peptide(seq1, alphabet)
reencoded2 = encode_peptide(seq2, alphabet)
df = kmer_comparison_table(
id1, reencoded1, id2, reencoded2, molecule_name=alphabet, ksizes=ksizes
)
dfs.append(df)
df = pd.concat(dfs, ignore_index=True)
return df
def compare_nucleotide_seqs(id1_seq1, id2_seq2, ksizes=KSIZES):
# Unpack the tuples
id1, seq1 = id1_seq1
id2, seq2 = id2_seq2
purine_pyrimidine1 = purine_pyrimidize(seq1)
purine_pyrimidine2 = purine_pyrimidize(seq2)
purine_primimdine_df = kmer_comparison_table(
id1,
purine_pyrimidine1,
id2,
purine_pyrimidine2,
molecule_name="purine_pyrimidine",
ksizes=ksizes,
)
weak_strong1 = weak_strong_ize(seq1)
weak_strong2 = weak_strong_ize(seq2)
weak_strong_df = kmer_comparison_table(
id1, weak_strong1, id2, weak_strong2, molecule_name="weak_strong", ksizes=ksizes
)
amino_keto1 = amino_keto_ize(seq1)
amino_keto2 = amino_keto_ize(seq2)
amino_keto_df = kmer_comparison_table(
id1, amino_keto1, id2, amino_keto2, molecule_name="amino_keto", ksizes=ksizes
)
nucleotide_df = kmer_comparison_table(
id1, seq1, id2, seq2, molecule_name="nucleotide", ksizes=ksizes
)
df = pd.concat(
[purine_primimdine_df, nucleotide_df, weak_strong_df, amino_keto_df],
ignore_index=True,
)
return df
def compare_seqs(id1_seq1, id2_seq2, ksizes=KSIZES, moltype="protein"):
if moltype == "protein":
return compare_peptide_seqs(id1_seq1, id2_seq2, ksizes)
elif moltype.lower() == "dna":
return compare_nucleotide_seqs(id1_seq1, id2_seq2, ksizes)
else:
raise ValueError(
f"{moltype} is not a valid molecule type! Only "
f"'protein' and 'dna' are supported"
)
def compare_args_unpack(args, ksizes, moltype):
"""Helper function to unpack the arguments. Written to use in pool.imap as
it can only be given one argument."""
return compare_seqs(*args, ksizes=ksizes, moltype=moltype)
def get_comparison_at_index(
index,
seqlist1,
seqlist2=None,
ksizes=KSIZES,
n_background=100,
moltype="protein",
verbose=False,
paired_seqlists=True,
intermediate_csv=False,
intermediate_parquet=False,
no_final_concatenation=False,
):
"""Returns similarities of all combinations of seqlist1 seqlist2 at index
Parameters
----------
index : int
generate masks from this image
seqlist1 : list
List of (id, seq) tuples
seqlist2 : list, optional (default None)
List of (id, seq) tuples. If None, then an all-by-all comparison of
sequences in seqlist1 is performed, as if seqlist1 was provided as
seqlist2.
ksizes : iterable of int
K-mer sizes to extract and compare the sequences on
moltype : str, optional (default "protein")
One of "protein" or "dna" -- for knowing which alphabets to use
verbose : boolean, default False
n_background : int, optional (default 100)
When paired_seqlist is True, how many random background sequences to
choose from seqlist2
paired_seqlists : bool, optional (default True)
If True, then seqlist1 and seqlist2 have sequences at the same index
that need to be compared, i.e. index 0 across the two. Best used when
seqlist1 and seqlist2 are lists of homologous protein sequences across
two different species
intermediate_parquet : bool
Write intermediate file of all comparisons at index i to an
IO-efficient parquet format
intermediate_csv : bool
Write intermediate file of all comparisons at index i to an
csv format
Returns
-------
comparison_df_list : list
list of pandas.DataFrame tables for the combinations of seqlist1 at
index, compared to seqlist2
"""
startt = time.time()
id1 = seqlist1[index][0]
id1_sanitized = sanitize_id(id1)
csv = id1_sanitized + ".csv"
parquet = id1_sanitized + ".parquet"
if os.path.exists(parquet):
notify(f"Found {parquet} already exists for {id1}, skipping", end="\r")
return []
if os.path.exists(csv):
notify(f"Found {csv} already exists for {id1}, skipping", end="\r")
return []
if seqlist2 is not None:
if paired_seqlists:
seq_iterator = get_paired_seq_iterator(
index, n_background, seqlist1, seqlist2, verbose
)
else:
seq_iterator = itertools.product([seqlist1[index]], seqlist2)
else:
seq_iterator = itertools.product([seqlist1[index]], seqlist1[index + 1 :])
func = partial(compare_args_unpack, ksizes=ksizes, moltype=moltype)
comparision_df_list = list(map(func, seq_iterator))
notify(
"comparison for index {} (id: {}) done in {:.5f} seconds",
index,
id1,
time.time() - startt,
end="\n",
)
if intermediate_csv or intermediate_parquet:
df =
|
pd.concat(comparision_df_list)
|
pandas.concat
|
import pandas as pd
import numpy as np
import datetime as dt
import math
#输入H 文件名
def cal_riskrt(H,source):
source=source.iloc[:,0:6]
source=source.drop(columns=["Unnamed: 0"])
source=source.set_index('date').dropna(subset=['long_rt','short_rt','long_short_rt'],how='all')
#新建一个数据框记录各种指标
df=pd.DataFrame(columns=['rt','volatility','mdd','sharpe','calmar'],index=['long','short','long_short','excess'])
#计算多头各项指标
rt=pd.DataFrame(source['long_rt'])
rt['prod'] = np.cumprod(rt['long_rt'] + 1)
holding_period = pd.to_datetime(rt.index.values[-1]) - pd.to_datetime(rt.index.values[0])
# #年化收益率
annual_ret = pow(rt['prod'][-1], 365 / holding_period.days) - 1
# #年化波动率
volatility = rt['long_rt'].std() * (math.sqrt(250 / H))
# #sharpe
sharpe = annual_ret / volatility
# #计算最大回撤
rt['max2here'] = rt['prod'].expanding(1).max()
rt['dd2here'] = (rt['prod'] / rt['max2here']) - 1
mdd = rt['dd2here'].min()
calmar = annual_ret / abs(mdd)
df.loc['long','rt']=annual_ret
df.loc['long','volatility']=volatility
df.loc['long','mdd']=mdd
df.loc['long','sharpe']=sharpe
df.loc['long','calmar']=calmar
#计算空头组合的指标(对照组)
rt = pd.DataFrame(source['short_rt'])
rt['short_rt']=rt['short_rt']
rt['prod'] = np.cumprod(rt['short_rt'] + 1)
holding_period = pd.to_datetime(rt.index.values[-1]) - pd.to_datetime(rt.index.values[0])
# #年化收益率
annual_ret = pow(rt['prod'][-1], 365 / holding_period.days) - 1
# #年化波动率
volatility = rt['short_rt'].std() * (math.sqrt(250 / H))
# #sharpe
sharpe = annual_ret / volatility
# #计算最大回撤
rt['max2here'] = rt['prod'].expanding(1).max()
rt['dd2here'] = (rt['prod'] / rt['max2here']) - 1
mdd = rt['dd2here'].min()
calmar = annual_ret / abs(mdd)
df.loc['short', 'rt'] = annual_ret
df.loc['short', 'volatility'] = volatility
df.loc['short', 'mdd'] = mdd
df.loc['short', 'sharpe'] = sharpe
df.loc['short', 'calmar'] = calmar
# 计算多空组合的指标
rt = pd.DataFrame(source['long_short_rt'])
rt['long_short_rt'] = rt['long_short_rt']
rt['prod'] = np.cumprod(rt['long_short_rt'] + 1)
holding_period = pd.to_datetime(rt.index.values[-1]) - pd.to_datetime(rt.index.values[0])
# #年化收益率
annual_ret = pow(rt['prod'][-1], 365 / holding_period.days) - 1
# #年化波动率
volatility = rt['long_short_rt'].std() * (math.sqrt(250 / H))
# #sharpe
sharpe = annual_ret / volatility
# #计算最大回撤
rt['max2here'] = rt['prod'].expanding(1).max()
rt['dd2here'] = (rt['prod'] / rt['max2here']) - 1
mdd = rt['dd2here'].min()
calmar = annual_ret / abs(mdd)
df.loc['long_short', 'rt'] = annual_ret
df.loc['long_short', 'volatility'] = volatility
df.loc['long_short', 'mdd'] = mdd
df.loc['long_short', 'sharpe'] = sharpe
df.loc['long_short', 'calmar'] = calmar
# 计算超额收益的指标
rt = pd.DataFrame(source['long_rt']-source['benchmark'])
rt.columns=['excess_rt']
rt['prod'] = np.cumprod(rt['excess_rt'] + 1)
holding_period = pd.to_datetime(rt.index.values[-1]) - pd.to_datetime(rt.index.values[0])
# #年化收益率
annual_ret = pow(rt['prod'][-1], 365 / holding_period.days) - 1
# #年化波动率
volatility = rt['excess_rt'].std() * (math.sqrt(250 / H))
# #sharpe
sharpe = annual_ret / volatility
# #计算最大回撤
rt['max2here'] = rt['prod'].expanding(1).max()
rt['dd2here'] = (rt['prod'] / rt['max2here']) - 1
mdd = rt['dd2here'].min()
calmar = annual_ret / abs(mdd)
df.loc['excess', 'rt'] = annual_ret
df.loc['excess', 'volatility'] = volatility
df.loc['excess', 'mdd'] = mdd
df.loc['excess', 'sharpe'] = sharpe
df.loc['excess', 'calmar'] = calmar
return df
rt_df=pd.read_csv("../draw/inv_level_H30.csv")
risk_rt=cal_riskrt(20,rt_df)
risk_rt.to_csv("inv_level.csv")
rt_df=
|
pd.read_csv("../draw/warehouseR90H5.csv")
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
These test the private routines in types/cast.py
"""
import pytest
from datetime import datetime, timedelta, date
import numpy as np
import pandas as pd
from pandas import (Timedelta, Timestamp, DatetimeIndex,
DataFrame, NaT, Period, Series)
from pandas.core.dtypes.cast import (
maybe_downcast_to_dtype,
maybe_convert_objects,
cast_scalar_to_array,
infer_dtype_from_scalar,
infer_dtype_from_array,
maybe_convert_string_to_object,
maybe_convert_scalar,
find_common_type)
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
PeriodDtype)
from pandas.core.dtypes.common import (
is_dtype_equal)
from pandas.util import testing as tm
class TestMaybeDowncast(object):
def test_downcast_conv(self):
# test downcasting
arr = np.array([8.5, 8.6, 8.7, 8.8, 8.9999999999995])
result = maybe_downcast_to_dtype(arr, 'infer')
assert (np.array_equal(result, arr))
arr = np.array([8., 8., 8., 8., 8.9999999999995])
result = maybe_downcast_to_dtype(arr, 'infer')
expected = np.array([8, 8, 8, 8, 9])
assert (np.array_equal(result, expected))
arr = np.array([8., 8., 8., 8., 9.0000000000005])
result = maybe_downcast_to_dtype(arr, 'infer')
expected = np.array([8, 8, 8, 8, 9])
assert (np.array_equal(result, expected))
# GH16875 coercing of bools
ser = Series([True, True, False])
result = maybe_downcast_to_dtype(ser, np.dtype(np.float64))
expected = ser
tm.assert_series_equal(result, expected)
# conversions
expected = np.array([1, 2])
for dtype in [np.float64, object, np.int64]:
arr = np.array([1.0, 2.0], dtype=dtype)
result = maybe_downcast_to_dtype(arr, 'infer')
tm.assert_almost_equal(result, expected, check_dtype=False)
for dtype in [np.float64, object]:
expected = np.array([1.0, 2.0, np.nan], dtype=dtype)
arr = np.array([1.0, 2.0, np.nan], dtype=dtype)
result = maybe_downcast_to_dtype(arr, 'infer')
tm.assert_almost_equal(result, expected)
# empties
for dtype in [np.int32, np.float64, np.float32, np.bool_,
np.int64, object]:
arr = np.array([], dtype=dtype)
result = maybe_downcast_to_dtype(arr, 'int64')
tm.assert_almost_equal(result, np.array([], dtype=np.int64))
assert result.dtype == np.int64
def test_datetimelikes_nan(self):
arr = np.array([1, 2, np.nan])
exp = np.array([1, 2, np.datetime64('NaT')], dtype='datetime64[ns]')
res = maybe_downcast_to_dtype(arr, 'datetime64[ns]')
tm.assert_numpy_array_equal(res, exp)
exp = np.array([1, 2, np.timedelta64('NaT')], dtype='timedelta64[ns]')
res = maybe_downcast_to_dtype(arr, 'timedelta64[ns]')
tm.assert_numpy_array_equal(res, exp)
def test_datetime_with_timezone(self):
# GH 15426
ts = Timestamp("2016-01-01 12:00:00", tz='US/Pacific')
exp = DatetimeIndex([ts, ts])
res = maybe_downcast_to_dtype(exp, exp.dtype)
tm.assert_index_equal(res, exp)
res = maybe_downcast_to_dtype(exp.asi8, exp.dtype)
tm.assert_index_equal(res, exp)
class TestInferDtype(object):
def testinfer_dtype_from_scalar(self):
# Test that infer_dtype_from_scalar is returning correct dtype for int
# and float.
for dtypec in [np.uint8, np.int8, np.uint16, np.int16, np.uint32,
np.int32, np.uint64, np.int64]:
data = dtypec(12)
dtype, val = infer_dtype_from_scalar(data)
assert dtype == type(data)
data = 12
dtype, val = infer_dtype_from_scalar(data)
assert dtype == np.int64
for dtypec in [np.float16, np.float32, np.float64]:
data = dtypec(12)
dtype, val = infer_dtype_from_scalar(data)
assert dtype == dtypec
data = np.float(12)
dtype, val = infer_dtype_from_scalar(data)
assert dtype == np.float64
for data in [True, False]:
dtype, val = infer_dtype_from_scalar(data)
assert dtype == np.bool_
for data in [np.complex64(1), np.complex128(1)]:
dtype, val = infer_dtype_from_scalar(data)
assert dtype == np.complex_
for data in [np.datetime64(1, 'ns'), Timestamp(1),
datetime(2000, 1, 1, 0, 0)]:
dtype, val = infer_dtype_from_scalar(data)
assert dtype == 'M8[ns]'
for data in [np.timedelta64(1, 'ns'), Timedelta(1),
timedelta(1)]:
dtype, val = infer_dtype_from_scalar(data)
assert dtype == 'm8[ns]'
for tz in ['UTC', 'US/Eastern', 'Asia/Tokyo']:
dt = Timestamp(1, tz=tz)
dtype, val = infer_dtype_from_scalar(dt, pandas_dtype=True)
assert dtype == 'datetime64[ns, {0}]'.format(tz)
assert val == dt.value
dtype, val = infer_dtype_from_scalar(dt)
assert dtype == np.object_
assert val == dt
for freq in ['M', 'D']:
p = Period('2011-01-01', freq=freq)
dtype, val = infer_dtype_from_scalar(p, pandas_dtype=True)
assert dtype == 'period[{0}]'.format(freq)
assert val == p.ordinal
dtype, val = infer_dtype_from_scalar(p)
dtype == np.object_
assert val == p
# misc
for data in [date(2000, 1, 1),
Timestamp(1, tz='US/Eastern'), 'foo']:
dtype, val = infer_dtype_from_scalar(data)
assert dtype == np.object_
def testinfer_dtype_from_scalar_errors(self):
with pytest.raises(ValueError):
infer_dtype_from_scalar(np.array([1]))
@pytest.mark.parametrize(
"arr, expected, pandas_dtype",
[('foo', np.object_, False),
(b'foo', np.object_, False),
(1, np.int_, False),
(1.5, np.float_, False),
([1], np.int_, False),
(np.array([1], dtype=np.int64), np.int64, False),
([np.nan, 1, ''], np.object_, False),
(np.array([[1.0, 2.0]]), np.float_, False),
(pd.Categorical(list('aabc')), np.object_, False),
(pd.Categorical([1, 2, 3]), np.int64, False),
(pd.Categorical(list('aabc')), 'category', True),
(pd.Categorical([1, 2, 3]), 'category', True),
(Timestamp('20160101'), np.object_, False),
(np.datetime64('2016-01-01'), np.dtype('<M8[D]'), False),
(pd.date_range('20160101', periods=3),
np.dtype('<M8[ns]'), False),
(pd.date_range('20160101', periods=3, tz='US/Eastern'),
'datetime64[ns, US/Eastern]', True),
(pd.Series([1., 2, 3]), np.float64, False),
(pd.Series(list('abc')), np.object_, False),
(pd.Series(pd.date_range('20160101', periods=3, tz='US/Eastern')),
'datetime64[ns, US/Eastern]', True)])
def test_infer_dtype_from_array(self, arr, expected, pandas_dtype):
dtype, _ = infer_dtype_from_array(arr, pandas_dtype=pandas_dtype)
assert is_dtype_equal(dtype, expected)
def test_cast_scalar_to_array(self):
arr = cast_scalar_to_array((3, 2), 1, dtype=np.int64)
exp = np.ones((3, 2), dtype=np.int64)
tm.assert_numpy_array_equal(arr, exp)
arr = cast_scalar_to_array((3, 2), 1.1)
exp = np.empty((3, 2), dtype=np.float64)
exp.fill(1.1)
tm.assert_numpy_array_equal(arr, exp)
arr = cast_scalar_to_array((2, 3), Timestamp('2011-01-01'))
exp = np.empty((2, 3), dtype='datetime64[ns]')
exp.fill(np.datetime64('2011-01-01'))
tm.assert_numpy_array_equal(arr, exp)
# pandas dtype is stored as object dtype
obj = Timestamp('2011-01-01', tz='US/Eastern')
arr = cast_scalar_to_array((2, 3), obj)
exp = np.empty((2, 3), dtype=np.object)
exp.fill(obj)
tm.assert_numpy_array_equal(arr, exp)
obj = Period('2011-01-01', freq='D')
arr = cast_scalar_to_array((2, 3), obj)
exp = np.empty((2, 3), dtype=np.object)
exp.fill(obj)
tm.assert_numpy_array_equal(arr, exp)
class TestMaybe(object):
def test_maybe_convert_string_to_array(self):
result = maybe_convert_string_to_object('x')
tm.assert_numpy_array_equal(result, np.array(['x'], dtype=object))
assert result.dtype == object
result = maybe_convert_string_to_object(1)
assert result == 1
arr = np.array(['x', 'y'], dtype=str)
result = maybe_convert_string_to_object(arr)
tm.assert_numpy_array_equal(result, np.array(['x', 'y'], dtype=object))
assert result.dtype == object
# unicode
arr = np.array(['x', 'y']).astype('U')
result = maybe_convert_string_to_object(arr)
tm.assert_numpy_array_equal(result, np.array(['x', 'y'], dtype=object))
assert result.dtype == object
# object
arr = np.array(['x', 2], dtype=object)
result = maybe_convert_string_to_object(arr)
tm.assert_numpy_array_equal(result, np.array(['x', 2], dtype=object))
assert result.dtype == object
def test_maybe_convert_scalar(self):
# pass thru
result = maybe_convert_scalar('x')
assert result == 'x'
result = maybe_convert_scalar(np.array([1]))
assert result == np.array([1])
# leave scalar dtype
result = maybe_convert_scalar(np.int64(1))
assert result == np.int64(1)
result = maybe_convert_scalar(np.int32(1))
assert result == np.int32(1)
result = maybe_convert_scalar(np.float32(1))
assert result == np.float32(1)
result = maybe_convert_scalar(np.int64(1))
assert result == np.float64(1)
# coerce
result = maybe_convert_scalar(1)
assert result == np.int64(1)
result = maybe_convert_scalar(1.0)
assert result == np.float64(1)
result = maybe_convert_scalar(Timestamp('20130101'))
assert result == Timestamp('20130101').value
result = maybe_convert_scalar(datetime(2013, 1, 1))
assert result == Timestamp('20130101').value
result = maybe_convert_scalar(Timedelta('1 day 1 min'))
assert result ==
|
Timedelta('1 day 1 min')
|
pandas.Timedelta
|
from covid_tracker.calculate_stat_summary import calculate_stat_summary
from covid_tracker.get_covid_data import get_covid_data
import pandas as pd
import pytest
def test_calculate_stat_summary():
"""Test the calculate_stat_summary() function"""
# data_type='cases'
input = get_covid_data()
# Test output type
assert isinstance(calculate_stat_summary(input, 'cases'), pd.DataFrame)
# Test output size
assert calculate_stat_summary(input, 'cases').shape == (14, 12)
# Test output column names
assert 'date_report' in calculate_stat_summary(input, 'cases').columns
assert 'province' in calculate_stat_summary(input, 'cases').columns
assert 'cases' in calculate_stat_summary(input, 'cases').columns
# data_type='mortality'
input = get_covid_data(data_type='mortality')
# Test output type
assert isinstance(calculate_stat_summary(input, 'deaths'), pd.DataFrame)
# Test output size
assert calculate_stat_summary(input, 'deaths').shape == (14, 12)
# Test output column names
assert 'date_death_report' in calculate_stat_summary(input, 'deaths').columns
assert 'province' in calculate_stat_summary(input, 'deaths').columns
assert 'deaths' in calculate_stat_summary(input, 'deaths').columns
def test_calculate_stat_summary_errors():
"""Test that calculate_stat_summary() raises the correct errors"""
input = get_covid_data()
# Tests that TypeErrors are raised when arguments are not the right type
with pytest.raises(TypeError):
calculate_stat_summary('2021-12-31', 'cases')
calculate_stat_summary(100, 'death')
calculate_stat_summary(input, 2)
calculate_stat_summary(input, 'province')
# Tests that ValueErrors are raised when arguments are value
with pytest.raises(ValueError):
calculate_stat_summary(pd.DataFrame(columns=["a", "b"]), 'cases')
calculate_stat_summary(pd.DataFrame({"a": [1], "b":[2]}), 'deaths')
calculate_stat_summary(pd.DataFrame(input, 'new'))
calculate_stat_summary(
|
pd.DataFrame({"a": [1], "b":[2]})
|
pandas.DataFrame
|
import datetime
import pymongo
import pandas as pd
import argparse
import sys
from pandas.io.json import json_normalize
import os
import matplotlib.pyplot as plt
mongo_details = {'address': None, 'auth': True, 'project_name': 'ConspiracyTheoriesUA', 'col_name': 'preNov2020',
'user': input('Enter the mongo username: '), 'password': input('Enter the mongo password: ')}
date_criteria = {'start_date': None, # Date should be entered in YYYY-MM-DD format. Ex: 2019-07-01 for July 1 2019
'end_date': None} # If you don't want a specific start or end date for data, set start_date or end_date to None
ap = argparse.ArgumentParser()
ap.add_argument("-o", "--output-file", type=str, required=False, help="The path (including .xlsx) for the output file. If not specified, the default is the timestamp for when the data was pulled.")
args = vars(ap.parse_args())
if args['output_file']:
output_file_name = 'data/' + args['output_file']
elif not args['output_file']:
output_file_name = 'data/preNov2020_retweets_w_userid/' + str(datetime.datetime.now().replace(microsecond=0)).replace(' ', 'T') + '.xlsx'
os.makedirs(os.path.dirname(output_file_name), exist_ok=True)
def build_mongo_connection():
print(str(datetime.datetime.now().replace(microsecond=0)) + " Building mongo connection")
mongoClient = pymongo.MongoClient(mongo_details['address'])
mongoClient.admin.authenticate(mongo_details['user'], mongo_details['password'])
databases = mongoClient.database_names()
project_dbs = [f for f in databases if mongo_details['project_name'] in f]
if len(project_dbs) == 0:
print('No databases found for the specified project. Is the project name in data_pull_config.py correct?')
sys.exit()
elif len(project_dbs) > 2:
print('The specified project name returns too many results. Is the project name in data_pull_config.py correct?')
sys.exit()
project_config_db = [f for f in project_dbs if 'Config' in f][0]
project_config_db = mongoClient[project_config_db]['config']
project_data_db = [f for f in project_dbs if '_' in f][0]
project_data_db = mongoClient[project_data_db][mongo_details['col_name']]
return project_config_db, project_data_db
project_config_db, project_data_db = build_mongo_connection()
aggregation_pipeline = [{'$match': {'retweeted_status': {'$exists': True}}}, {'$group': {'_id': '$retweeted_status.id_str', 'count': {'$sum': 1}, 'rt_account': {'$first': '$retweeted_status.user.id'}, 'rt_info': {'$push': {'account': '$user.id', 'rt_time': '$created_ts'}}}}]
rt_count = list(project_data_db.aggregate(aggregation_pipeline, allowDiskUse=True))
rt_count = json_normalize(rt_count)
rt_count['_id'] = "ID_" + rt_count['_id']
simple_rt_file = output_file_name.replace(os.path.basename(output_file_name), 'simple_rt_count_info.csv')
simple_rt_count = rt_count[['_id', 'count', 'rt_account']]
simple_rt_count.to_csv(simple_rt_file, index=False)
output_folder = 'metadata_pulls/data/preNov2020_retweets_w_id/counts/'
os.makedirs(output_folder, exist_ok=True)
for idx in rt_count.index:
rt_row = rt_count.loc[idx]
rt_info = rt_row['rt_info']
rt_info = json_normalize(rt_info)
try:
rt_info_file = output_folder + str(rt_row['_id']) + '.csv'
rt_info.to_csv(rt_info_file, index=False)
except OSError as e:
# This makes a new folder when the number of files in a folder exceeds the limit
print(e)
output_folder = output_folder.replace('counts/', 'counts2/')
os.makedirs(output_folder, exist_ok=True)
rt_info_file = output_folder + str(rt_row['_id']) + '.csv'
rt_info.to_csv(rt_info_file, index=False)
##############################
#
# The next section makes viz
#
##############################
rt_count = simple_rt_count['count'].value_counts()
rt_count.sort_index(inplace=True)
account_rt_count = simple_rt_count.groupby(by='rt_account').agg({'count': 'sum'})
account_rt_count.sort_values(by='count', ascending=False, inplace=True)
count_account_rt_count = account_rt_count['count'].value_counts()
count_account_rt_count.sort_index(inplace=True)
"""
First up, a scatter plot of the number of retweets per tweet
"""
fig = plt.figure(figsize=(6,4))
plot1 = fig.add_subplot(111)
plot1.set_yscale('log')
scatter = plt.scatter(x=rt_count.index, y = rt_count, s=.5)
plot1.set_xlim(0,30000)
plot1.set_xlabel('Number of retweets')
plot1.set_ylabel('Number of tweets retweeted this many times')
#plt.show()
plt.savefig('retweets_per_tweet.pdf')
"""
Next, a scatter plot of the number of retweets of accounts
"""
fig = plt.figure()
plot1 = fig.add_subplot(111)
scatter = plt.scatter(x=count_account_rt_count.index, y = count_account_rt_count, s=.5)
plot1.set_xlabel('Number of retweets')
plot1.set_ylabel('Number of accounts retweeted this many times')
#plt.show()
plt.savefig('retweets_of_accounts.pdf')
##############################
#
# To figure out how many times an account retweets others, we need to transform the network.
# This is probably excessive, but it's the approach we used.
#
##############################
del simple_rt_count, rt_count
print("{} Reading {}.".format(datetime.datetime.now(), simple_rt_file))
rt_summary_data = pd.read_csv(simple_rt_file)
rt_summary_data.set_index('_id', inplace=True)
rt_summary_data.drop(columns='count', inplace=True)
folders = ['counts', 'counts2']
print("{} Building file list.".format(datetime.datetime.now()))
all_files = []
for folder in folders:
folder_files = os.listdir(folder)
folder_files = [folder + '/' + f for f in folder_files]
folder_files = [f for f in folder_files if 'ID_' in f]
all_files.extend(folder_files)
retweet_matrix = {}
print("{} Building retweet matrix.".format(datetime.datetime.now()))
file_counter = 0
number_of_file = len(all_files)
for file in all_files:
file_counter += 1
file_contents = pd.read_csv(file)
if file_counter % 10000 == 0:
print("{} Processing {} of {} retweet files.".format(datetime.datetime.now(), file_counter, number_of_file))
retweeted_account = str(rt_summary_data.loc[os.path.basename(file).replace('.csv','')]['rt_account'])
if not retweet_matrix.get(retweeted_account):
retweet_matrix[retweeted_account] = {}
accounts_that_retweeted = file_contents['account']
retweeted_account_retweeters = retweet_matrix[retweeted_account]
accounts_that_retweeted = [str(account) for account in accounts_that_retweeted]
for account in accounts_that_retweeted:
if account in retweeted_account_retweeters:
retweeted_account_retweeters[account] += 1
else:
retweeted_account_retweeters[account] = 1
retweet_matrix[retweeted_account] = retweeted_account_retweeters
retweet_network_info_list = []
to_account_counter = 0
num_to_accounts = len(retweet_matrix)
for to_account in retweet_matrix:
to_account_counter += 1
from_accounts = retweet_matrix[to_account]
for from_account in from_accounts:
from_account_number = from_accounts[from_account]
df_row = [{'RT': from_account, 'Org': to_account}]
df_rows = [info for info in df_row for idx in range(from_account_number)]
retweet_network_info_list.extend(df_rows)
if to_account_counter % 10000 == 0:
print("{} {} of {} to-accounts processed".format(datetime.datetime.now(), to_account_counter, num_to_accounts))
retweet_network_df =
|
pd.DataFrame(retweet_network_info_list)
|
pandas.DataFrame
|
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2021/6/29 16:32
Desc:
"""
import pandas as pd
import requests
def fund_em_aum() -> pd.DataFrame:
"""
东方财富-基金-基金公司排名列表
http://fund.eastmoney.com/Company/lsgm.html
:return: 基金公司排名列表
:rtype: pandas.DataFrame
"""
url = 'http://fund.eastmoney.com/Company/home/gspmlist'
params = {
'fundType': '0'
}
r = requests.get(url, params=params)
temp_df = pd.read_html(r.text)[0]
del temp_df['相关链接']
del temp_df['天相评级']
temp_df.columns = ['序号', '基金公司', '成立时间', '全部管理规模', '全部基金数', '全部经理数']
expanded_df = temp_df['全部管理规模'].str.split(' ', expand=True)
temp_df['全部管理规模'] = expanded_df.iloc[:, 0].str.replace(",", "")
temp_df['更新日期'] = expanded_df.iloc[:, 1]
temp_df['全部管理规模'] = pd.to_numeric(temp_df['全部管理规模'], errors="coerce")
temp_df['全部基金数'] = pd.to_numeric(temp_df['全部基金数'])
temp_df['全部经理数'] = pd.to_nume
|
ric(temp_df['全部经理数'])
|
pandas.to_numeric
|
from os.path import exists, expanduser
import pandas as pd
import time
class ExecExcel:
"""
read xlsx and csv
"""
def __init__(self, file_path):
self.file_path = expanduser(file_path)
def read(self, sheet='Sheet1', axis=0, index_col=None, **kwargs):
df = pd.ExcelFile(self.file_path)
sheets = [sheet] if sheet else df.sheet_names
df_parse = df.parse(sheets, index_col=index_col, **kwargs)
frame_data = pd.concat(df_parse, axis=axis)
return ExcelResponse(frame_data, self.file_path)
def read_csv(self, **kwargs):
frame_data = pd.read_csv(self.file_path, **kwargs)
return ExcelResponse(frame_data, self.file_path)
def data_format(self, data: list, axis=0):
"""
Write data to excel.
:param axis:
:param data: dict in list
"""
fd = [pd.DataFrame(item, index=[0]) for item in data]
frame_data = pd.concat(fd, axis=axis)
return ExcelResponse(frame_data, self.file_path)
def append_row(self, data: list, sheet='Sheet1', axis=0, index_col=None, **kwargs):
if exists(self.file_path):
df = pd.ExcelFile(self.file_path)
sheets = [sheet] if sheet else df.sheet_names
df_parse = df.parse(sheets, index_col=index_col, **kwargs)
frame_data = pd.concat(df_parse, axis=axis)
else:
frame_data = pd.DataFrame()
new_data = pd.concat([pd.DataFrame(item, index=[0]) for item in data], axis=axis)
frame_data =
|
pd.concat([frame_data, new_data], axis=axis)
|
pandas.concat
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/3/21 0021
# @Author : justin.郑 <EMAIL>
# @File : index_baidu.py
# @Desc : 获取百度指数
import json
import urllib.parse
import pandas as pd
import requests
def decrypt(t: str, e: str) -> str:
"""
解密函数
:param t:
:type t:
:param e:
:type e:
:return:
:rtype:
"""
n, i, a, result = list(t), list(e), {}, []
ln = int(len(n) / 2)
start, end = n[ln:], n[:ln]
a = dict(zip(end, start))
return "".join([a[j] for j in e])
def get_ptbk(uniqid: str, cookie: str) -> str:
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Connection": "keep-alive",
"Cookie": cookie,
"Host": "index.baidu.com",
"Referer": "http://index.baidu.com/v2/main/index.html",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
session = requests.Session()
session.headers.update(headers)
with session.get(
url=f"http://index.baidu.com/Interface/ptbk?uniqid={uniqid}"
) as response:
ptbk = response.json()["data"]
return ptbk
def baidu_interest_index(word, cookie):
"""
百度指数 人群画像兴趣分布
:param word: 关键词
:param cookie:
:return:
desc 兴趣分类
tgi TGI指数
word_rate 关键词分布比率
all_rate 全网分布比率
period 周期范围
"""
try:
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "no-cache",
"Cookie": cookie,
"DNT": "1",
"Host": "zhishu.baidu.com",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "zhishu.baidu.com",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
url = "http://index.baidu.com/api/SocialApi/interest?wordlist[]=%s" % word
r = requests.get(url=url, headers=headers)
data = json.loads(r.text)['data']
period = "%s|%s" % (data['startDate'], data['endDate'])
age_list = data['result'][0]['interest']
age_df = pd.DataFrame(age_list)
all_list = data['result'][1]['interest']
all_df = pd.DataFrame(all_list)
all_df.drop(["tgi", "typeId"], axis=1, inplace=True)
res_df = pd.merge(age_df, all_df, on='desc')
res_df['period'] = period
res_df.drop(["typeId"], axis=1, inplace=True)
res_df.rename(columns={'rate_x': 'word_rate', 'rate_y': 'all_rate'}, inplace=True)
return res_df
except:
return None
def baidu_gender_index(word, cookie):
"""
百度指数 人群画像性别分布
:param word: 关键词
:param cookie:
:return:
desc 性别
tgi TGI指数
word_rate 关键词分布比率
all_rate 全网分布比率
period 周期范围
"""
try:
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "no-cache",
"Cookie": cookie,
"DNT": "1",
"Host": "zhishu.baidu.com",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "zhishu.baidu.com",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
url = "http://index.baidu.com/api/SocialApi/baseAttributes?wordlist[]=%s" % word
r = requests.get(url=url, headers=headers)
data = json.loads(r.text)['data']
period = "%s|%s" % (data['startDate'], data['endDate'])
age_list = data['result'][0]['gender']
age_df = pd.DataFrame(age_list)
all_list = data['result'][1]['gender']
all_df = pd.DataFrame(all_list)
all_df.drop(["tgi", "typeId"], axis=1, inplace=True)
res_df = pd.merge(age_df, all_df, on='desc')
res_df['period'] = period
res_df.drop(["typeId"], axis=1, inplace=True)
res_df.rename(columns={'rate_x': 'word_rate', 'rate_y': 'all_rate'}, inplace=True)
return res_df
except:
return None
def baidu_age_index(word, cookie):
"""
百度指数 人群画像年龄分布
:param word: 关键词
:param cookie:
:return:
desc 年龄范围
tgi TGI指数
word_rate 关键词分布比率
all_rate 全网分布比率
period 周期范围
"""
try:
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "no-cache",
"Cookie": cookie,
"DNT": "1",
"Host": "zhishu.baidu.com",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "zhishu.baidu.com",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
url = "http://index.baidu.com/api/SocialApi/baseAttributes?wordlist[]=%s" % word
r = requests.get(url=url, headers=headers)
data = json.loads(r.text)['data']
period = "%s|%s" % (data['startDate'], data['endDate'])
age_list = data['result'][0]['age']
age_df = pd.DataFrame(age_list)
all_list = data['result'][1]['age']
all_df = pd.DataFrame(all_list)
all_df.drop(["tgi", "typeId"], axis=1, inplace=True)
res_df = pd.merge(age_df, all_df, on='desc')
res_df['period'] = period
res_df.drop(["typeId"], axis=1, inplace=True)
res_df.rename(columns={'rate_x': 'word_rate', 'rate_y': 'all_rate'}, inplace=True)
return res_df
except:
return None
def baidu_atlas_index(word, cookie, date=None):
"""
百度指数 需求图谱
:param word: 关键词
:param cookie:
:param date: 周期
:return:
period 周期范围
word 相关词
pv 搜索热度
ratio 搜索变化率
"""
try:
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "no-cache",
"Cookie": cookie,
"DNT": "1",
"Host": "zhishu.baidu.com",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "zhishu.baidu.com",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
if date == None:
date = ""
url = "http://index.baidu.com/api/WordGraph/multi?wordlist[]=%s&datelist=%s" % (word, date)
r = requests.get(url=url, headers=headers)
data = json.loads(r.text)['data']
wordlist = data['wordlist'][0]['wordGraph']
res_list = []
for word in wordlist:
tmp = {
"word": word['word'],
"pv": word['pv'],
"ratio": word['ratio'],
"period": data['period']
# "sim": word['sim']
}
res_list.append(tmp)
df =
|
pd.DataFrame(res_list)
|
pandas.DataFrame
|
from distutils.version import LooseVersion
from warnings import catch_warnings
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
import pandas as pd
from pandas import (
DataFrame,
HDFStore,
Index,
MultiIndex,
Series,
_testing as tm,
bdate_range,
concat,
date_range,
isna,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
tables,
)
from pandas.io.pytables import Term
pytestmark = pytest.mark.single
def test_select_columns_in_where(setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_select_with_dups(setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_select(setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
{
"ts": bdate_range("2012-01-01", periods=300),
"A": np.random.randn(300),
}
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select("df", f"boolv == {v}", columns=["A", "boolv"])
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select("df", f"boolv == {v}", columns=["A", "boolv"])
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame({"A": np.random.rand(20), "B": np.random.rand(20)})
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
{
"A": np.random.rand(20),
"B": np.random.rand(20),
"index": np.arange(20, dtype="f8"),
}
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"ts": bdate_range("2012-01-01", periods=300),
"A": np.random.randn(300),
"B": range(300),
"users": ["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ [f"a{i:03d}" for i in range(100)],
}
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select("df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']")
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + [f"a{i:03d}" for i in range(60)]
result = store.select("df", "ts>=Timestamp('2012-02-01') and users=selector")
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
msg = "can only use an iterator or chunksize on a table"
with pytest.raises(TypeError, match=msg):
read_hdf(path, "df_non_table", chunksize=100)
with pytest.raises(TypeError, match=msg):
read_hdf(path, "df_non_table", iterator=True)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
results = list(read_hdf(path, "df", chunksize=100))
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, "df"))
# multiple
with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(["df1", "df2"], selector="df1")
results = list(
store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150)
)
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select("df")
|
tm.assert_frame_equal(expected, result)
|
pandas._testing.assert_frame_equal
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Created by <NAME>
import unittest
import pandas as pd
import pandas.testing as pdtest
from allfreqs import AlleleFreqs
from allfreqs.classes import Reference, MultiAlignment
from allfreqs.tests.constants import (
REAL_ALG_X_FASTA, REAL_ALG_X_NOREF_FASTA, REAL_RSRS_FASTA,
REAL_ALG_L6_FASTA, REAL_ALG_L6_NOREF_FASTA,
SAMPLE_MULTIALG_FASTA, SAMPLE_MULTIALG_NOREF_FASTA, SAMPLE_REF_FASTA,
SAMPLE_MULTIALG_CSV, SAMPLE_MULTIALG_NOREF_CSV, SAMPLE_REF_CSV,
sample_sequences_df, SAMPLE_SEQUENCES_DICT, sample_sequences_freqs,
sample_sequences_freqs_amb, SAMPLE_FREQUENCIES,
SAMPLE_FREQUENCIES_AMB, REAL_ALG_X_DF, REAL_X_FREQUENCIES, REAL_ALG_L6_DF,
REAL_L6_FREQUENCIES, TEST_CSV
)
class TestBasic(unittest.TestCase):
def setUp(self) -> None:
ref = Reference("AAG-CTNGGGCATTTCAGGGTGAGCCCGGGCAATACAGGG-TAT")
alg = MultiAlignment(SAMPLE_SEQUENCES_DICT)
self.af = AlleleFreqs(multialg=alg, reference=ref)
self.af_amb = AlleleFreqs(multialg=alg, reference=ref, ambiguous=True)
def test_df(self):
# Given/When
exp_df = sample_sequences_df()
# Then
pdtest.assert_frame_equal(self.af.df, exp_df)
def test_frequencies(self):
# Given/When
exp_freqs = sample_sequences_freqs()
# Then
pdtest.assert_frame_equal(self.af.frequencies, exp_freqs)
def test_frequencies_ambiguous(self):
# Given/When
exp_freqs = sample_sequences_freqs_amb()
# Then
pdtest.assert_frame_equal(self.af_amb.frequencies, exp_freqs)
def test__get_frequencies(self):
# Given
test_freq = pd.Series({'A': 0.2, 'C': 0.2, 'G': 0.1, 'T': 0.3,
'-': 0.1, 'N': 0.1})
exp_freq = {'A': 0.2, 'C': 0.2, 'G': 0.1, 'T': 0.3, 'gap': 0.1,
'oth': 0.1}
# When
result = self.af._get_frequencies(test_freq)
# Then
self._dict_almost_equal(result, exp_freq)
def test_to_csv(self):
# Given/When
self.af.to_csv(TEST_CSV)
result = pd.read_csv(TEST_CSV)
expected =
|
pd.read_csv(SAMPLE_FREQUENCIES)
|
pandas.read_csv
|
# IMPORTATION STANDARD
import os
# IMPORTATION THIRDPARTY
import pandas as pd
import pytest
# IMPORTATION INTERNAL
from openbb_terminal.stocks.backtesting import bt_controller
# pylint: disable=E1101
# pylint: disable=W0603
# pylint: disable=E1111
EMPTY_DF = pd.DataFrame()
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"queue, expected",
[
(["ema", "help"], []),
(["quit", "help"], ["help"]),
],
)
def test_menu_with_queue(expected, mocker, queue):
mocker.patch(
target=(
"openbb_terminal.stocks.backtesting.bt_controller."
"BacktestingController.switch"
),
return_value=["quit"],
)
result_menu = bt_controller.BacktestingController(
ticker="TSLA",
stock=pd.DataFrame(),
queue=queue,
).menu()
assert result_menu == expected
@pytest.mark.vcr(record_mode="none")
def test_menu_without_queue_completion(mocker):
# ENABLE AUTO-COMPLETION : HELPER_FUNCS.MENU
mocker.patch(
target="openbb_terminal.feature_flags.USE_PROMPT_TOOLKIT",
new=True,
)
mocker.patch(
target="openbb_terminal.parent_classes.session",
)
mocker.patch(
target="openbb_terminal.parent_classes.session.prompt",
return_value="quit",
)
# DISABLE AUTO-COMPLETION : CONTROLLER.COMPLETER
mocker.patch.object(
target=bt_controller.obbff,
attribute="USE_PROMPT_TOOLKIT",
new=True,
)
mocker.patch(
target="openbb_terminal.stocks.backtesting.bt_controller.session",
)
mocker.patch(
target="openbb_terminal.stocks.backtesting.bt_controller.session.prompt",
return_value="quit",
)
result_menu = bt_controller.BacktestingController(
ticker="TSLA",
stock=pd.DataFrame(),
queue=None,
).menu()
assert result_menu == []
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"mock_input",
["help", "homee help", "home help", "mock"],
)
def test_menu_without_queue_sys_exit(mock_input, mocker):
# DISABLE AUTO-COMPLETION
mocker.patch.object(
target=bt_controller.obbff,
attribute="USE_PROMPT_TOOLKIT",
new=False,
)
mocker.patch(
target="openbb_terminal.stocks.backtesting.bt_controller.session",
return_value=None,
)
# MOCK USER INPUT
mocker.patch("builtins.input", return_value=mock_input)
# MOCK SWITCH
class SystemExitSideEffect:
def __init__(self):
self.first_call = True
def __call__(self, *args, **kwargs):
if self.first_call:
self.first_call = False
raise SystemExit()
return ["quit"]
mock_switch = mocker.Mock(side_effect=SystemExitSideEffect())
mocker.patch(
target=(
"openbb_terminal.stocks.backtesting.bt_controller."
"BacktestingController.switch"
),
new=mock_switch,
)
result_menu = bt_controller.BacktestingController(
ticker="TSLA",
stock=pd.DataFrame(),
queue=None,
).menu()
assert result_menu == []
@pytest.mark.vcr(record_mode="none")
@pytest.mark.record_stdout
def test_print_help():
controller = bt_controller.BacktestingController(
ticker="TSLA",
stock=pd.DataFrame(),
)
controller.print_help()
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"an_input, expected_queue",
[
("", []),
("/help", ["home", "help"]),
("help/help", ["help", "help"]),
("q", ["quit"]),
("h", []),
("r", ["quit", "quit", "reset", "stocks", "load TSLA", "bt"]),
],
)
def test_switch(an_input, expected_queue):
controller = bt_controller.BacktestingController(
ticker="TSLA",
stock=pd.DataFrame(),
queue=None,
)
queue = controller.switch(an_input=an_input)
assert queue == expected_queue
@pytest.mark.vcr(record_mode="none")
def test_call_cls(mocker):
mocker.patch("os.system")
controller = bt_controller.BacktestingController(
ticker="TSLA",
stock=pd.DataFrame(),
)
controller.call_cls([])
assert not controller.queue
os.system.assert_called_once_with("cls||clear")
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"func, queue, expected_queue",
[
(
"call_exit",
[],
[
"quit",
"quit",
"quit",
],
),
("call_exit", ["help"], ["quit", "quit", "quit", "help"]),
("call_home", [], ["quit", "quit"]),
("call_help", [], []),
("call_quit", [], ["quit"]),
("call_quit", ["help"], ["quit", "help"]),
(
"call_reset",
[],
["quit", "quit", "reset", "stocks", "load TSLA", "bt"],
),
(
"call_reset",
["help"],
["quit", "quit", "reset", "stocks", "load TSLA", "bt", "help"],
),
],
)
def test_call_func_expect_queue(expected_queue, queue, func):
controller = bt_controller.BacktestingController(
ticker="TSLA",
stock=
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Copyright (c) 2019 <NAME>
pySME is a Python script to run R SME package
(https://cran.r-project.org/web/packages/sme/index.html). SME package generates
smoothing-splines mixed-effects models from metabolomics data. This script
follows methodology given by Berk et al. (2011) and utilizes bootstrapping to
approximate p-values. Running this script requires R with SME package installed.
"""
import os
import numpy as np
from scipy import interpolate
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import cm
from rpy2.robjects.packages import importr
from rpy2.robjects import pandas2ri
import statsmodels.stats.multitest as smm
import time
import copy
import smeutils
smePack = importr('sme', lib_loc="C:/Users/user/Documents/R/win-library/3.6")
statsPack = importr('stats')
# Settings ====================================================================
# Input files
info = pd.read_csv('./sme_info.csv')
data = pd.read_csv('./sme_data.csv')
info_arr = np.array(info)
data_fid = np.array(data.columns)
data_arr = np.array(data)
selIdx = np.arange(len(data_fid))
# Parameters
RUN = True
N = 12 # Number of subjects
t_n = 4 # Number of time points
iplN = 100 # Number of interpolated time points
n_bootstrap = 500 # Number of bootstrap sampling
selIdx = selIdx[:] # List of metabolites to analyze
relative = False # Scale data to initial values
correctOutlier = False
SAVE = False
USEMEAN = True
# SME Parameters
ctra = "AICc" # Criteria
init_l_mc = 1e-8 # Initial lambda_mu
init_l_vc = 1e-8 # Initial lambda_v
init_l_mt = 5e-8 # Initial lambda_mu
init_l_vt = 5e-8 # Initial lambda_v
maxIter = 100000 # Maximum iteration
deltaEM = 1e-3 # Threshold for expetation maximization
deltaNM = 1e-3 # Threshold for nelder mead
normalizeTime = True
seed = 1234 # RNG seed
showFig = False # Flag to plot figures
figSize = (20,16) # Size of figures
plotLegend = False # Flag to plot legend
colorMap = 'viridis' # kwarg for colormap
plotSMEMeanOnly = False # Only plot SME mean trace
mergePlot = True # Merge multiple plots
plotHeatmap = False # Plot heatmap comparing two data groups
t = np.array([1,3,5,7])
iplT = np.linspace(1, 7, iplN)
iplTIdx = np.empty(t_n)
for i in range(t_n):
iplTIdx[i] = np.where(iplT == t[i])[0]
iplTIdx = iplTIdx.astype(int)
sel = np.array([data_fid[selIdx]]).flatten()
#==============================================================================
np.random.seed(seed) # Set seed
#==============================================================================
if relative:
data = smeutils.normalizeData(data, N, t_n, data_fid)
#==============================================================================
t0 = time.time()
fulldataRaw = pd.concat([info,data], axis=1)
fulldataRaw = fulldataRaw.astype('float64')
fulldata = copy.deepcopy(fulldataRaw)
fulldata = fulldata.drop(fulldata.index[16]) # ind 5 has an outlier
if correctOutlier:
fulldata = smeutils.correctOutlier(fulldata, sel, t, t_n)
# Initialize ==================================================================
grp0_f = fulldata[(fulldata.grp == 0)]['ind']
grp1_f = fulldata[(fulldata.grp == 1)]['ind']
grp0 = np.unique(fulldata[(fulldata.grp == 0)]['ind'])
grp1 = np.unique(fulldata[(fulldata.grp == 1)]['ind'])
pandas2ri.activate()
fd_ri = pandas2ri.py2ri(fulldata)
fd_rigrp0 = fd_ri.rx(fd_ri.rx2("grp").ro == 0, True)
fd_rigrp1 = fd_ri.rx(fd_ri.rx2("grp").ro == 1, True)
fd_rigrp0tme = fd_rigrp0.rx2("tme")
fd_rigrp0ind = fd_rigrp0.rx2("ind")
fd_rigrp1tme = fd_rigrp1.rx2("tme")
fd_rigrp1ind = fd_rigrp1.rx2("ind")
ys0mu = np.empty((len(sel), iplN))
ys1mu = np.empty((len(sel), iplN))
ys0vHat = np.empty((len(sel), len(grp0), iplN))
ys1vHat = np.empty((len(sel), len(grp1), iplN))
l2 = np.empty(len(sel))
se = np.empty(len(sel))
se0 = np.empty((len(sel), len(grp0)))
se1 = np.empty((len(sel), len(grp1)))
sem = np.empty(len(sel))
tval = np.empty(len(sel))
ys0v = np.empty((len(sel), len(grp0), t_n))
ys1v = np.empty((len(sel), len(grp1), t_n))
ys0eta = np.empty((len(sel), len(grp0), t_n))
ys1eta = np.empty((len(sel), len(grp1), t_n))
ys0mubs = np.empty((n_bootstrap, len(sel), iplN))
ys1mubs = np.empty((n_bootstrap, len(sel), iplN))
ys0vHatbs = np.empty((n_bootstrap, len(sel), len(grp0), iplN))
ys1vHatbs = np.empty((n_bootstrap, len(sel), len(grp1), iplN))
l2bs = np.empty((n_bootstrap, len(sel)))
sebs = np.empty((n_bootstrap, len(sel)))
se0bs = np.empty((n_bootstrap, len(sel), len(grp0)))
se1bs = np.empty((n_bootstrap, len(sel), len(grp1)))
sembs = np.empty((n_bootstrap, len(sel)))
tvalbs = np.empty((n_bootstrap, len(sel)))
pval = np.empty(len(sel))
t1 = time.time()
print(t1 - t0)
# SME =========================================================================
if RUN:
for m_i in range(len(sel)):
fd_rigrp0obj = fd_rigrp0.rx2(sel[m_i])
fd_rigrp1obj = fd_rigrp1.rx2(sel[m_i])
fit0 = smePack.sme(fd_rigrp0obj,
fd_rigrp0tme,
fd_rigrp0ind,
criteria=ctra,
maxIter=maxIter,
deltaEM=deltaEM,
deltaNM=deltaNM,
initial_lambda_mu=init_l_mc,
initial_lambda_v=init_l_mc,
normalizeTime=normalizeTime)
fit1 = smePack.sme(fd_rigrp1obj,
fd_rigrp1tme,
fd_rigrp1ind,
criteria=ctra,
maxIter=maxIter,
deltaEM=deltaEM,
deltaNM=deltaNM,
initial_lambda_mu=init_l_mt,
initial_lambda_v=init_l_vt,
normalizeTime=normalizeTime)
fit0coef = np.array(fit0.rx2('coefficients'))
fit1coef = np.array(fit1.rx2('coefficients'))
spl0mu = interpolate.CubicSpline(t, fit0coef[0], bc_type='natural')
ys0mu[m_i] = spl0mu(iplT)
spl1mu = interpolate.CubicSpline(t, fit1coef[0], bc_type='natural')
ys1mu[m_i] = spl1mu(iplT)
l2[m_i] = np.sqrt(np.trapz(np.square(ys0mu[m_i] - ys1mu[m_i]), x=iplT))
for g0 in range(len(grp0)):
spl0 = interpolate.CubicSpline(t, fit0coef[g0 + 1] + fit0coef[0], bc_type='natural')
ys0vHat[m_i][g0] = spl0(iplT)
ys0v[m_i][g0] = ys0mu[m_i][iplTIdx] - ys0vHat[m_i][g0][iplTIdx]
ys0eta[m_i][g0] = fulldataRaw.loc[fulldataRaw.ind == grp0[g0], sel[m_i]] - ys0vHat[m_i][g0][iplTIdx]
se0[m_i][g0] = np.trapz(np.square(ys0mu[m_i] - ys0vHat[m_i][g0]), x=iplT)
for g1 in range(len(grp1)):
spl1 = interpolate.CubicSpline(t, fit1coef[g1 + 1] + fit1coef[0], bc_type='natural')
ys1vHat[m_i][g1] = spl1(iplT)
ys1v[m_i][g1] = ys1mu[m_i][iplTIdx] - ys1vHat[m_i][g1][iplTIdx]
ys1eta[m_i][g1] = fulldataRaw.loc[fulldataRaw.ind == grp1[g1], sel[m_i]] - ys1vHat[m_i][g1][iplTIdx]
se1[m_i][g1] = np.trapz(np.square(ys1mu[m_i] - ys1vHat[m_i][g1]), x=iplT)
se[m_i] = np.sqrt(np.mean(se0[m_i])/len(grp0) + np.mean(se1[m_i])/len(grp1))
sem = 0.
tval = np.divide(l2, se + sem)
ys0vFlat = ys0v.reshape((ys0v.shape[0], -1))
ys0etaFlat = ys0eta.reshape((ys0eta.shape[0], -1))
ys0etaFlat = np.delete(ys0etaFlat, 13, 1) # ind 5 has an outlier
ys1vFlat = ys1v.reshape((ys1v.shape[0], -1))
ys1etaFlat = ys1eta.reshape((ys1eta.shape[0], -1))
t2 = time.time()
print(t2 - t1)
# Bootstrapping ===============================================================
fulldataS = []
for bcount in range(n_bootstrap):
print("Bootstrap run: " + str(bcount))
fulldataC = copy.deepcopy(fulldataRaw)
for m_i in range(len(sel)):
if USEMEAN:
for Di in range(N):
ysmuMean = (ys0mu[m_i][iplTIdx] + ys1mu[m_i][iplTIdx])/2
if Di in grp0:
fulldataC[sel[m_i]][np.arange(0,t_n*N,N)+Di] = (ysmuMean
+ np.random.choice(ys0vFlat[m_i], size=t_n)
+ np.random.choice(ys0etaFlat[m_i], size=t_n))
else:
fulldataC[sel[m_i]][np.arange(0,t_n*N,N)+Di] = (ysmuMean
+ np.random.choice(ys1vFlat[m_i], size=t_n)
+ np.random.choice(ys1etaFlat[m_i], size=t_n))
else:
ct_rand = np.random.rand()
for Di in range(N):
if ct_rand < 0.5:
if Di in grp0:
fulldataC[sel[m_i]][np.arange(0,t_n*N,N)+Di] = (ys0mu[m_i][iplTIdx]
+ np.random.choice(ys0vFlat[m_i], size=t_n)
+ np.random.choice(ys0etaFlat[m_i], size=t_n))
else:
fulldataC[sel[m_i]][np.arange(0,t_n*N,N)+Di] = (ys0mu[m_i][iplTIdx]
+ np.random.choice(ys1vFlat[m_i], size=t_n)
+ np.random.choice(ys1etaFlat[m_i], size=t_n))
else:
if Di in grp0:
fulldataC[sel[m_i]][np.arange(0,t_n*N,N)+Di] = (ys1mu[m_i][iplTIdx]
+ np.random.choice(ys0vFlat[m_i], size=t_n)
+ np.random.choice(ys0etaFlat[m_i], size=t_n))
else:
fulldataC[sel[m_i]][np.arange(0,t_n*N,N)+Di] = (ys1mu[m_i][iplTIdx]
+ np.random.choice(ys1vFlat[m_i], size=t_n)
+ np.random.choice(ys1etaFlat[m_i], size=t_n))
fulldataC = fulldataC.drop(fulldataC.index[16]) # ind 5 has an outlier
fulldataS.append(fulldataC)
fd_ri = pandas2ri.py2ri(fulldataC)
fd_rigrp0 = fd_ri.rx(fd_ri.rx2("grp").ro == 0, True)
fd_rigrp1 = fd_ri.rx(fd_ri.rx2("grp").ro == 1, True)
for m_i in range(len(sel)):
fd_rigrp0objbs = fd_rigrp0.rx2(sel[m_i])
fd_rigrp1objbs = fd_rigrp1.rx2(sel[m_i])
fit0 = smePack.sme(fd_rigrp0objbs,
fd_rigrp0tme,
fd_rigrp0ind,
criteria=ctra,
maxIter=maxIter,
deltaEM=deltaEM,
deltaNM=deltaNM,
initial_lambda_mu=init_l_mc,
initial_lambda_v=init_l_vc,
normalizeTime=normalizeTime)
fit1 = smePack.sme(fd_rigrp1objbs,
fd_rigrp1tme,
fd_rigrp1ind,
criteria=ctra,
maxIter=maxIter,
deltaEM=deltaEM,
deltaNM=deltaNM,
initial_lambda_mu=init_l_mt,
initial_lambda_v=init_l_vt,
normalizeTime=normalizeTime)
fit0coefbs = np.array(fit0.rx2('coefficients'))
fit1coefbs = np.array(fit1.rx2('coefficients'))
spl0mubs = interpolate.CubicSpline(t, fit0coefbs[0], bc_type='natural')
ys0mubs[bcount][m_i] = spl0mubs(iplT)
spl1mubs = interpolate.CubicSpline(t, fit1coefbs[0], bc_type='natural')
ys1mubs[bcount][m_i] = spl1mubs(iplT)
l2bs[bcount][m_i] = np.sqrt(np.trapz(np.square(ys0mubs[bcount][m_i] - ys1mubs[bcount][m_i]), x=iplT))
for g0 in range(len(grp0)):
spl0bs = interpolate.CubicSpline(t, fit0coefbs[g0 + 1] + fit0coefbs[0], bc_type='natural')
ys0vHatbs[bcount][m_i][g0] = spl0bs(iplT)
se0bs[bcount][m_i][g0] = np.trapz(np.square(ys0mubs[bcount][m_i] - ys0vHatbs[bcount][m_i][g0]), x=iplT)
for g1 in range(len(grp1)):
spl1bs = interpolate.CubicSpline(t, fit1coefbs[g1 + 1] + fit1coefbs[0], bc_type='natural')
ys1vHatbs[bcount][m_i][g1] = spl1bs(iplT)
se1bs[bcount][m_i][g1] = np.trapz(np.square(ys1mubs[bcount][m_i] - ys1vHatbs[bcount][m_i][g1]), x=iplT)
sebs[bcount][m_i] = np.sqrt(np.mean(se0bs[bcount][m_i])/len(grp0) + np.mean(se1bs[bcount][m_i])/len(grp1))
sembs = 0.
tvalbs[bcount] = np.divide(l2bs[bcount], sebs[bcount] + sembs)
t3 = time.time()
print(t3 - t2)
for m_i in range(len(sel)):
pval[m_i] = (tvalbs[:,m_i] >= tval[m_i]).sum()/n_bootstrap
pvalCorr = smm.multipletests(pval, alpha=0.05, method='fdr_bh')[1]
print('p-value: ' + str(len(np.where(pval <= 0.05)[0])))
print(np.where(pval <= 0.05)[0])
# Plotting ====================================================================
cmap1 = cm.get_cmap(colorMap, 2)
cmap2 = cm.get_cmap(colorMap, N)
cmap3 = cm.get_cmap(colorMap, len(sel))
cmap_grp0 = cm.get_cmap('viridis', len(grp0))
cmap_grp1 = cm.get_cmap('viridis', len(grp1))
def plotC(idx):
"""
Plots data points, individual, and mean curve of control group
:param idx: index of the selection
"""
fdgrp0tme_arr = np.array(fulldata[fulldata.grp == 0]["tme"])
fdgrp0sel_arr = np.array(fulldata[fulldata.grp == 0][sel])
plt.figure(figsize=figSize)
if not plotSMEMeanOnly:
for g0 in range(len(grp0)):
tmeIdx = np.where(grp0_f == grp0[g0])
plt.plot(fdgrp0tme_arr[tmeIdx], fdgrp0sel_arr[:,idx][tmeIdx], color=cmap_grp0(g0), marker='o', linestyle='')
plt.plot(iplT, ys0vHat[idx][g0], color=cmap_grp0(g0), linestyle='dashed')
plt.plot(iplT, ys0mu[idx], lw=3, color=cmap1(0))
plt.show()
def plotT(idx):
"""
Plots data points, individual, and mean curve of treatment group
:param idx: index of the selection
"""
fdgrp1tme_arr = np.array(fulldata[fulldata.grp == 1]["tme"])
fdgrp1sel_arr = np.array(fulldata[fulldata.grp == 1][sel])
plt.figure(figsize=figSize)
if not plotSMEMeanOnly:
for g1 in range(len(grp1)):
tmeIdx = np.where(grp1_f == grp1[g1])
plt.plot(fdgrp1tme_arr[tmeIdx], fdgrp1sel_arr[:,idx][tmeIdx], color=cmap_grp1(g1), marker='o', linestyle='')
plt.plot(iplT, ys1vHat[idx][g1], color=cmap_grp1(g1), linestyle='dashed')
plt.plot(iplT, ys1mu[idx], lw=3, color=cmap1(1))
plt.show()
def plotCT(idx):
"""
Plots data points, individual, and mean curve of both control and treatment group
:param idx: index of the selection
"""
fdgrp0tme_arr = np.array(fulldata[fulldata.grp == 0]["tme"])
fdgrp0sel_arr = np.array(fulldata[fulldata.grp == 0][sel])
fdgrp1tme_arr = np.array(fulldata[fulldata.grp == 1]["tme"])
fdgrp1sel_arr = np.array(fulldata[fulldata.grp == 1][sel])
plt.figure(figsize=figSize)
if not plotSMEMeanOnly:
for g0 in range(len(grp0)):
tmeIdx = np.where(grp0_f == grp0[g0])
plt.plot(fdgrp0tme_arr[tmeIdx], fdgrp0sel_arr[:,idx][tmeIdx], color=cmap1(0), marker='o', linestyle='')
plt.plot(iplT, ys0vHat[idx][g0], color=cmap1(0), linestyle='dashed')
for g1 in range(len(grp1)):
tmeIdx = np.where(grp1_f == grp1[g1])
plt.plot(fdgrp1tme_arr[tmeIdx], fdgrp1sel_arr[:,idx][tmeIdx], color=cmap1(1), marker='o', linestyle='')
plt.plot(iplT, ys1vHat[idx][g1], color=cmap1(len(sel)), linestyle='dashed')
plt.plot(iplT, ys0mu[idx], lw=3, color=cmap1(0))
plt.plot(iplT, ys1mu[idx], lw=3, color=cmap1(1))
plt.show()
def plotCTbs(bcount, idx):
"""
Plots data points, individual, and mean curve of both control and treatment group for a bootstrapping sample
:param bcount: index of bootstrapping sample
:param idx: index of the selection
"""
fdgrp0tme_arr = np.array(fulldataS[bcount][fulldataS[bcount].grp == 0]["tme"])
fdgrp0sel_arr = np.array(fulldataS[bcount][fulldataS[bcount].grp == 0][sel])
fdgrp1tme_arr = np.array(fulldataS[bcount][fulldataS[bcount].grp == 1]["tme"])
fdgrp1sel_arr = np.array(fulldataS[bcount][fulldataS[bcount].grp == 1][sel])
plt.figure(figsize=figSize)
if not plotSMEMeanOnly:
for g0 in range(len(grp0)):
tmeIdx = np.where(grp0_f == grp0[g0])
plt.plot(fdgrp0tme_arr[tmeIdx], fdgrp0sel_arr[:,idx][tmeIdx], color=cmap1(0), marker='o', linestyle='')
plt.plot(iplT, ys0vHatbs[bcount][idx][g0], color=cmap1(0), linestyle='dashed')
for g1 in range(len(grp1)):
tmeIdx = np.where(grp1_f == grp1[g1])
plt.plot(fdgrp1tme_arr[tmeIdx], fdgrp1sel_arr[:,idx][tmeIdx], color=cmap1(1), marker='o', linestyle='')
plt.plot(iplT, ys1vHatbs[bcount][idx][g1], color=cmap1(len(sel)), linestyle='dashed')
plt.plot(iplT, ys0mubs[bcount][idx], lw=3, color=cmap1(0))
plt.plot(iplT, ys1mubs[bcount][idx], lw=3, color=cmap1(1))
plt.show()
def exportOutput(path=None):
"""
Export an output to specified path
"""
if path:
outputdir = path
else:
outputdir = os.path.join(os.getcwd(), 'output')
if not os.path.exists(outputdir):
os.mkdir(outputdir)
fulldataRaw.to_csv(os.path.join(outputdir, 'fulldataRaw.csv'))
fulldata.to_csv(os.path.join(outputdir, 'fulldata.csv'))
df = pd.DataFrame(ys0mu)
df.to_csv(os.path.join(outputdir, 'ys0mu.csv'))
df = pd.DataFrame(ys1mu)
df.to_csv(os.path.join(outputdir, 'ys1mu.csv'))
if not os.path.exists(os.path.join(outputdir, 'ys0vHat')):
os.mkdir(os.path.join(outputdir, 'ys0vHat'))
if not os.path.exists(os.path.join(outputdir, 'ys1vHat')):
os.mkdir(os.path.join(outputdir, 'ys1vHat'))
for i in range(len(ys0vHat)):
df1 = pd.DataFrame(ys0vHat[i])
df1.to_csv(os.path.join(os.path.join(outputdir, 'ys0vHat'), 'ys0vHat_' + str(i) + '.csv'))
df2 =
|
pd.DataFrame(ys1vHat[i])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/funnel_pymc3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="iPLM5TwcMcTe"
# In this notebook, we explore the "funnel of hell". This refers to a posterior in which
# the mean and variance of a variable are highly correlated, and have a funnel
# shape. (The term "funnel of hell" is from [this blog post](https://twiecki.io/blog/2014/03/17/bayesian-glms-3/) by <NAME>.)
#
# We illustrate this using a hierarchical Bayesian model for inferring Gaussian means, fit to synthetic data, similar to 8 schools (except we vary the same size and fix the variance). This code is based on [this notebook](http://bebi103.caltech.edu.s3-website-us-east-1.amazonaws.com/2017/tutorials/aux8_mcmc_tips.html) from <NAME>.
# + id="-sWa3BStE4ov"
# %matplotlib inline
import sklearn
import scipy.stats as stats
import scipy.optimize
import matplotlib.pyplot as plt
import seaborn as sns
import time
import numpy as np
import os
import pandas as pd
# + id="1UEFiUi-qZA1" colab={"base_uri": "https://localhost:8080/"} outputId="1a20ff5d-68e6-4f60-81e0-1456bfa83b5f"
# !pip install -U pymc3>=3.8
import pymc3 as pm
print(pm.__version__)
import arviz as az
print(az.__version__)
# + id="SS-lUcY9ovUd"
import math
import pickle
import numpy as np
import pandas as pd
import scipy.stats as st
import theano.tensor as tt
import theano
# + id="H4iJ8eTAr3yF" colab={"base_uri": "https://localhost:8080/"} outputId="23291ee5-7822-41fb-d3ca-c829cd0891f5"
np.random.seed(0)
# Specify parameters for random data
mu_val = 8
tau_val = 3
sigma_val = 10
n_groups = 10
# Generate number of replicates for each repeat
n = np.random.randint(low=3, high=10, size=n_groups, dtype=int)
print(n)
print(sum(n))
# + id="oyyDYNGfsmUa" colab={"base_uri": "https://localhost:8080/"} outputId="f8d2cf60-fbbd-4a29-fcd6-747cd2e18870"
# Generate data set
mus = np.zeros(n_groups)
x = np.array([])
for i in range(n_groups):
mus[i] = np.random.normal(mu_val, tau_val)
samples = np.random.normal(mus[i], sigma_val, size=n[i])
x = np.append(x, samples)
print(x.shape)
group_ind = np.concatenate([[i]*n_val for i, n_val in enumerate(n)])
# + id="Vz-gdn-zuCcx" colab={"base_uri": "https://localhost:8080/", "height": 692} outputId="19b32b08-cffc-4800-9667-5ff22df6f387"
with pm.Model() as centered_model:
# Hyperpriors
mu = pm.Normal('mu', mu=0, sd=5)
tau = pm.HalfCauchy('tau', beta=2.5)
log_tau = pm.Deterministic('log_tau', tt.log(tau))
# Prior on theta
theta = pm.Normal('theta', mu=mu, sd=tau, shape=n_groups)
# Likelihood
x_obs = pm.Normal('x_obs',
mu=theta[group_ind],
sd=sigma_val,
observed=x)
np.random.seed(0)
with centered_model:
centered_trace = pm.sample(10000, chains=2)
pm.summary(centered_trace).round(2)
# + id="UMLPIRMPsgej" colab={"base_uri": "https://localhost:8080/", "height": 963} outputId="3227aaef-1030-490f-8605-5744d27f269c"
with pm.Model() as noncentered_model:
# Hyperpriors
mu = pm.Normal('mu', mu=0, sd=5)
tau = pm.HalfCauchy('tau', beta=2.5)
log_tau = pm.Deterministic('log_tau', tt.log(tau))
# Prior on theta
#theta = pm.Normal('theta', mu=mu, sd=tau, shape=n_trials)
var_theta = pm.Normal('var_theta', mu=0, sd=1, shape=n_groups)
theta = pm.Deterministic('theta', mu + var_theta * tau)
# Likelihood
x_obs = pm.Normal('x_obs',
mu=theta[group_ind],
sd=sigma_val,
observed=x)
np.random.seed(0)
with noncentered_model:
noncentered_trace = pm.sample(1000, chains=2)
pm.summary(noncentered_trace).round(2)
# + id="XqQQUavXvFWT" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="88b33782-8b68-4057-e1c9-b582e6db8cc1"
fig, axs = plt.subplots(ncols=2, sharex=True, sharey=True)
x = pd.Series(centered_trace['mu'], name='mu')
y = pd.Series(centered_trace['tau'], name='tau')
axs[0].plot(x, y, '.');
axs[0].set(title='Centered', xlabel='µ', ylabel='τ');
axs[0].axhline(0.01)
x = pd.Series(noncentered_trace['mu'], name='mu')
y = pd.Series(noncentered_trace['tau'], name='tau')
axs[1].plot(x, y, '.');
axs[1].set(title='NonCentered', xlabel='µ', ylabel='τ');
axs[1].axhline(0.01)
xlim = axs[0].get_xlim()
ylim = axs[0].get_ylim()
# + id="--jgSNVBLadC" colab={"base_uri": "https://localhost:8080/", "height": 495} outputId="6cf32ae5-ee7b-4abe-bf8f-b51450bb02d1"
x = pd.Series(centered_trace['mu'], name='mu')
y = pd.Series(centered_trace['tau'], name='tau')
g = sns.jointplot(x, y, xlim=xlim, ylim=ylim)
plt.suptitle('centered')
plt.show()
# + id="tEfEJ8JuLX43" colab={"base_uri": "https://localhost:8080/", "height": 495} outputId="4869fb30-3d07-4e0c-a6da-03c1014923b3"
x = pd.Series(noncentered_trace['mu'], name='mu')
y = pd.Series(noncentered_trace['tau'], name='tau')
g = sns.jointplot(x, y, xlim=xlim, ylim=ylim)
plt.suptitle('noncentered')
plt.show()
# + id="1-FQqDkTFEqy" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="b9804230-dc6c-4586-9a5a-1ad38a9cab82"
fig, axs = plt.subplots(ncols=2, sharex=True, sharey=True)
x =
|
pd.Series(centered_trace['mu'], name='mu')
|
pandas.Series
|
'''
1. ############ annotations_metadata.csv ##############
file_id, user_id, subforum_id, num_contexts, label
pick - file_id and label
2. ########### Read All files ###########
go to all file directory
and
collect texts against the file_ids
'''
import os
import collections
from pandas import read_csv as read_csv
from pandas import DataFrame
from pandas import concat, merge
from definitions import DATA_DIR, TRANSFORMED_DATA_DIR
from etl.definations_configurations import ABUSE, NO_ABUSE
class WhiteSupremacy:
def _get_fileId_label(self):
'''
Read the file and get Id and Label of the data
'''
path = os.path.join(DATA_DIR, "10 - hate-speech-dataset-master")
file_data = read_csv(path+r'\annotations_metadata.csv')
file_data = file_data.loc[:, ['file_id', 'label']]
# unique labels --> ['noHate' 'hate' 'idk/skip' 'relation']
file_data = file_data[(file_data.label == 'hate') | (file_data.label == 'noHate')]
# transform the labels as needed
file_data.label = file_data.label.apply(lambda x: ABUSE if x.strip().lower() == 'hate' else NO_ABUSE)
return file_data # ['file_id', 'label']
def get_white_supremiest_data(self):
'''
get the comment text
'''
file_pattern = '*.txt'
file_path = DATA_DIR + r"\10 - hate-speech-dataset-master\all_files"
row = collections.namedtuple('row', ['file_id', 'text'])
texts = []
id_label_frame = _get_fileId_label()
# id_label_frame = id_label_frame.head(5)
for file_id in id_label_frame['file_id']:
with open(os.path.join(file_path,file_id+'.txt'), 'r', encoding='utf-8') as file:
# id_label_frame[file_id,'text'] = file.read().splitlines()
texts.append(row(file_id=file_id, text=file.read().splitlines()))
text_df = DataFrame(texts)
# return id_label_frame
text_df =
|
merge(id_label_frame, text_df, how='inner', on='file_id')
|
pandas.merge
|
'''
This script is to help
with basic data preparation
with the nuMoM2b dataset
'''
import pandas as pd
import numpy as np
# location of the data in this repository (not saved to Github!)
data_loc = './data/nuMoM2b_Dataset_NICHD Data Challenge.csv'
# This does dummy variables for multiple columns
# Here used for drug codes, but could be used for ICD codes
# In health claims data (order does not matter)
def encode_mult(df, cols, encode=None, base="V", cum=False, missing_drop=True):
res_counts = []
if not encode:
tot_encode = pd.unique(df[cols].values.ravel())
if missing_drop:
check =
|
pd.isnull(tot_encode)
|
pandas.isnull
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": pandas.StringDtype(),
"dramCorrectables1to1": pandas.StringDtype(),
"dramCorrectables4to1": pandas.StringDtype(),
"dramCorrectablesSram": pandas.StringDtype(),
"dramCorrectablesUnknown": pandas.StringDtype(),
"pliCapTestInterval": pandas.StringDtype(),
"pliCapTestCount": pandas.StringDtype(),
"pliCapTestResult": pandas.StringDtype(),
"pliCapTestTimeStamp": pandas.StringDtype(),
"channelHangSuccess": pandas.StringDtype(),
"channelHangFail": pandas.StringDtype(),
"BitErrorsHost41": pandas.StringDtype(),
"BitErrorsHost42": pandas.StringDtype(),
"BitErrorsHost43": pandas.StringDtype(),
"BitErrorsHost44": pandas.StringDtype(),
"BitErrorsHost45": pandas.StringDtype(),
"BitErrorsHost46": pandas.StringDtype(),
"BitErrorsHost47": pandas.StringDtype(),
"BitErrorsHost48": pandas.StringDtype(),
"BitErrorsHost49": pandas.StringDtype(),
"BitErrorsHost50": pandas.StringDtype(),
"BitErrorsHost51": pandas.StringDtype(),
"BitErrorsHost52": pandas.StringDtype(),
"BitErrorsHost53": pandas.StringDtype(),
"BitErrorsHost54": pandas.StringDtype(),
"BitErrorsHost55": pandas.StringDtype(),
"BitErrorsHost56": pandas.StringDtype(),
"mrrNearMiss": pandas.StringDtype(),
"mrrRereadAvg": pandas.StringDtype(),
"readDisturbEvictions": pandas.StringDtype(),
"L1L2ParityError": pandas.StringDtype(),
"pageDefects": pandas.StringDtype(),
"pageProvisionalTotal": pandas.StringDtype(),
"ASICTemp": pandas.StringDtype(),
"PMICTemp": pandas.StringDtype(),
"size": pandas.StringDtype(),
"lastWrite": pandas.StringDtype(),
"timesWritten": pandas.StringDtype(),
"maxNumContextBands": pandas.StringDtype(),
"blankCount": pandas.StringDtype(),
"cleanBands": pandas.StringDtype(),
"avgTprog": pandas.StringDtype(),
"avgEraseCount": pandas.StringDtype(),
"edtcHandledBandCnt": pandas.StringDtype(),
"bandReloForNLBA": pandas.StringDtype(),
"bandCrossingDuringPliCount": pandas.StringDtype(),
"bitErrBucketNum": pandas.StringDtype(),
"sramCorrectablesTotal": pandas.StringDtype(),
"l1SramCorrErrCnt": pandas.StringDtype(),
"l2SramCorrErrCnt": pandas.StringDtype(),
"parityErrorValue": pandas.StringDtype(),
"parityErrorType": pandas.StringDtype(),
"mrr_LutValidDataSize": pandas.StringDtype(),
"pageProvisionalDefects": pandas.StringDtype(),
"plisWithErasesInProgress": pandas.StringDtype(),
"lastReplayDebug": pandas.StringDtype(),
"externalPreReadFatals": pandas.StringDtype(),
"hostReadCmd": pandas.StringDtype(),
"hostWriteCmd": pandas.StringDtype(),
"trimmedSectors": pandas.StringDtype(),
"trimTokens": pandas.StringDtype(),
"mrrEventsInCodewords": pandas.StringDtype(),
"mrrEventsInSectors": pandas.StringDtype(),
"powerOnMicroseconds": pandas.StringDtype(),
"mrrInXorRecEvents": pandas.StringDtype(),
"mrrFailInXorRecEvents": pandas.StringDtype(),
"mrrUpperpageEvents": pandas.StringDtype(),
"mrrLowerpageEvents": pandas.StringDtype(),
"mrrSlcpageEvents": pandas.StringDtype(),
"mrrReReadTotal": pandas.StringDtype(),
"powerOnResets": pandas.StringDtype(),
"powerOnMinutes": pandas.StringDtype(),
"throttleOnMilliseconds": pandas.StringDtype(),
"ctxTailMagic": pandas.StringDtype(),
"contextDropCount": pandas.StringDtype(),
"lastCtxSequenceId": pandas.StringDtype(),
"currCtxSequenceId": pandas.StringDtype(),
"mbliEraseCount": pandas.StringDtype(),
"pageAverageProgramCount": pandas.StringDtype(),
"bandAverageEraseCount": pandas.StringDtype(),
"bandTotalEraseCount": pandas.StringDtype(),
"bandReloForXorRebuildFail": pandas.StringDtype(),
"defragSpeculativeMiss": pandas.StringDtype(),
"uncorrectableBackgroundScan": pandas.StringDtype(),
"BitErrorsHost57": pandas.StringDtype(),
"BitErrorsHost58": pandas.StringDtype(),
"BitErrorsHost59": pandas.StringDtype(),
"BitErrorsHost60": pandas.StringDtype(),
"BitErrorsHost61": pandas.StringDtype(),
"BitErrorsHost62": pandas.StringDtype(),
"BitErrorsHost63": pandas.StringDtype(),
"BitErrorsHost64": pandas.StringDtype(),
"BitErrorsHost65": pandas.StringDtype(),
"BitErrorsHost66": pandas.StringDtype(),
"BitErrorsHost67": pandas.StringDtype(),
"BitErrorsHost68": pandas.StringDtype(),
"BitErrorsHost69": pandas.StringDtype(),
"BitErrorsHost70": pandas.StringDtype(),
"BitErrorsHost71": pandas.StringDtype(),
"BitErrorsHost72": pandas.StringDtype(),
"BitErrorsHost73": pandas.StringDtype(),
"BitErrorsHost74": pandas.StringDtype(),
"BitErrorsHost75": pandas.StringDtype(),
"BitErrorsHost76": pandas.StringDtype(),
"BitErrorsHost77": pandas.StringDtype(),
"BitErrorsHost78": pandas.StringDtype(),
"BitErrorsHost79": pandas.StringDtype(),
"BitErrorsHost80": pandas.StringDtype(),
"bitErrBucketArray1":
|
pandas.StringDtype()
|
pandas.StringDtype
|
#!/usr/bin/env python3
#
# Create model outputs with P.1203 software.
#
# Copyright 2018 <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
from itu_p1203.p1203Pv import P1203Pv
from itu_p1203.p1203Pq import P1203Pq
import pandas as pd
import yaml
import argparse
import json
import numpy as np
from tqdm import tqdm
tqdm.pandas()
DB_IDS = ['TR04', 'TR06', 'VL04', 'VL13']
ROOT_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
def parse_mode3_features(pvs_id, features_mode3_path):
pvs_features = pd.read_csv(
os.path.join(
features_mode3_path,
pvs_id + '.csv')
)
return pvs_features
def calc_mode0_O22(row):
pvs_features = (int(row["coding_res"]),
int(row["display_res"]),
float(row["bitrate_kbps_segment_size"]),
int(row["framerate"]))
return P1203Pv.video_model_function_mode0(*pvs_features)
def calc_mode1_O22(row):
pvs_features = (int(row["coding_res"]),
int(row["display_res"]),
float(row["bitrate_kbps_segment_size"]),
int(row["framerate"]),
[],
float(row["iframe_ratio"]))
return P1203Pv.video_model_function_mode1(*pvs_features)
def calc_mode2_O22(row):
# check if fallback is needed
has_bitstream_data = "BS_TwoPercentQP1" in row.keys() and isinstance(row["BS_TwoPercentQP1"], str)
try:
avg_qp = eval(row["BS_TwoPercentQP1"])
except Exception as e:
has_bitstream_data = False
if has_bitstream_data:
frame_types = eval(row["types"])
frames = []
for ftyp, qp_values in zip(frame_types, avg_qp):
frames.append({
'type': ftyp,
'qpValues': [qp_values]
})
pvs_features = (
int(row["coding_res"]),
int(row["display_res"]),
int(row["framerate"]),
frames,
None,
[]
)
return P1203Pv.video_model_function_mode2(*pvs_features)
else:
# tqdm.write("Switching back to Mode 1 for PVS {}, sample index {}".format(row["pvs_id"], row["sample_index"]))
return None
def calc_mode3_O22(row):
frame_types = eval(row["types"])
avg_qp = eval(row["BS_Av_QPBB"])
frames = []
for ftyp, qp_values in zip(frame_types, avg_qp):
frames.append({
'type': ftyp,
'qpValues': [qp_values]
})
pvs_features = (
int(row["coding_res"]),
int(row["display_res"]),
float(row["framerate"]),
frames,
None,
[]
)
return P1203Pv.video_model_function_mode3(*pvs_features)
def calc_O46(O21, O22, device, stall_vec=[]):
l_buff = []
p_buff = []
if stall_vec:
for l, p in stall_vec:
l_buff.append(l)
p_buff.append(p)
pq_fun = P1203Pq(O21, O22, l_buff, p_buff, device)
return pq_fun.calculate()
def main(args):
db_data = pd.DataFrame()
O21_path = os.path.join(ROOT_PATH, 'data', 'O21.csv')
stalling_dir_path = os.path.join(ROOT_PATH, 'data', 'test_configs')
features_mode0_path = os.path.join(ROOT_PATH, 'data', 'features', 'features_mode0.csv')
features_mode1_path = os.path.join(ROOT_PATH, 'data', 'features', 'features_mode1.csv')
features_mode2_path = os.path.join(ROOT_PATH, 'data', 'features', 'features_mode2')
features_mode3_path = os.path.join(ROOT_PATH, 'data', 'features', 'features_mode3')
# read in data
# O21
O21_data = pd.read_csv(O21_path)
# stalling
yaml_per_db = {}
for db_id in DB_IDS:
yaml_per_db[db_id] = yaml.load(
open(os.path.join(stalling_dir_path, db_id + '-config.yaml')))
# read in from hdf-files if they exist, otherwise run pv-calc
if args.create_hdfs:
print('Calculating O22 scores for all modes ...')
# mode0 features
print('Reading mode 0 features ...')
mode0_features = pd.read_csv(features_mode0_path)
# mode1 features
print('Reading mode 1 features ...')
mode1_features = pd.read_csv(features_mode1_path)
# mode2 features
print('Reading mode 2 features (may take a while) ...')
pvss = mode1_features["pvs_id"].unique()
list_of_dataframes_for_mode2 = []
for pvs_id in tqdm(pvss):
pvs_data_all = pd.read_csv(os.path.join(features_mode2_path, pvs_id + '.csv'))
if "BS_TwoPercentQP1" in pvs_data_all.keys():
list_of_dataframes_for_mode2.append(
pvs_data_all[[
"pvs_id", "sample_index", "framerate", "types", "sizes", "quant", "coding_res", "display_res", "BS_TwoPercentQP1"
]].copy()
)
else:
# no bitstream data available
list_of_dataframes_for_mode2.append(
pvs_data_all[[
"pvs_id", "sample_index", "framerate", "types", "sizes", "coding_res", "display_res"
]].copy()
)
mode2_features = pd.concat(list_of_dataframes_for_mode2, ignore_index=True)
# mode3 features
print('Reading mode 3 features (may take a while) ...')
pvss = mode1_features["pvs_id"].unique()
list_of_dataframes_for_mode3 = []
for pvs_id in tqdm(pvss):
pvs_data_all = pd.read_csv(os.path.join(features_mode3_path, pvs_id + '.csv'))
list_of_dataframes_for_mode3.append(
pvs_data_all[[
"pvs_id", "sample_index", "framerate", "types", "quant", "coding_res", "display_res", "BS_Av_QPBB"
]].copy()
)
mode3_features =
|
pd.concat(list_of_dataframes_for_mode3, ignore_index=True)
|
pandas.concat
|
"""
XeroExtractConnector(): Connection between Xero and Database
"""
import logging
import sqlite3
import time
from os import path
from typing import List
import copy
import pandas as pd
class XeroExtractConnector:
"""
- Extract Data from Xero and load to Database
"""
def __init__(self, xero, dbconn):
self.__dbconn = dbconn
self.__xero = xero
self.__dbconn.row_factory = sqlite3.Row
self.logger = logging.getLogger(self.__class__.__name__)
def create_tables(self):
"""
Creates DB tables
"""
basepath = path.dirname(__file__)
ddlpath = path.join(basepath, 'extract_ddl.sql')
ddlsql = open(ddlpath, 'r').read()
self.__dbconn.executescript(ddlsql)
def extract_contacts(self) -> List[str]:
"""
Extract contacts from Xero
:return: List of contact ids
"""
self.logger.debug('extracting contacts from Xero')
contacts = self.__xero.contacts.all()
if not contacts:
self.logger.info('Extracted 0 contacts')
return []
df_contacts = pd.DataFrame(contacts)
df_contacts = df_contacts[['ContactID', 'Name', 'ContactStatus', 'IsSupplier', 'IsCustomer']]
df_contacts.to_sql('xero_extract_contacts', self.__dbconn, if_exists='append', index=False)
self.logger.info('Extracted %d contacts', len(df_contacts))
return df_contacts['ContactID'].to_list()
def extract_trackingcategories(self) -> List[str]:
"""
Extract tracking options from Xero
:return: List of tracking option ids
"""
self.logger.debug('extracting tracking from Xero')
trackingcategories = self.__xero.trackingcategories.all()
self.logger.debug('trackingcategories = %s', str(trackingcategories))
if not trackingcategories:
self.logger.info('Extracted 0 trackingcategories and 0 trackingoptions')
return []
# tracking categories is a nested structure - so we get two flatted ones and create two tables
tcl = []
tol = []
for tc in trackingcategories:
options = copy.deepcopy(tc['Options'])
tcl.append(tc)
for to in options:
to['TrackingCategoryID'] = tc['TrackingCategoryID']
tol.append(to)
df_tcl =
|
pd.DataFrame(tcl)
|
pandas.DataFrame
|
import numpy as np
from scipy import ndimage
import pandas as pd
import geopandas as gpd
from shapely.geometry import Point, Polygon
import shapefile
import os
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from utility import *
class Visualize:
def __init__(self, resolution, input_filepath, output_filepath):
self.resolution = resolution
self.input_filepath = input_filepath
self.output_filepath = output_filepath
self.gridmap = None
self.x_min = None
self.y_min = None
def set_patrol_posts(self, patrol_post_filename):
patrol_posts = pd.read_csv(patrol_post_filename)
self.patrol_posts = self.shift_data_coords(patrol_posts)
###########################################################
# utility
###########################################################
def mask_to_grid(self, map):
return np.ma.masked_where(self.gridmap==0, map)
# for the gridmap, return list of indices of valid cells
# order: begin from top left, then go row-by-row
def get_indices_from_gridmap(self):
# need this complicated way to compute corresponding indices
# within the gridmap boundary because numpy indexing
# starts at lower left corner, and the CSV file assumes
# ordering starts in top left corner
idx = [[], []]
for y in range(self.gridmap.shape[0] - 1, -1, -1):
add_idx = np.where(self.gridmap[y, :] == 1)
idx[0] += [y] * add_idx[0].shape[0]
idx[1] += list(add_idx[0])
return tuple(idx)
# get list of np.arrays, where each is a map of predicted risk`
# at a different threshold of patrol effort
def get_map_from_array(self, array):
idx = self.get_indices_from_gridmap()
map = np.zeros(self.gridmap.shape)
map[idx] = array
return map
# get list of np.arrays, where each is a map of predicted risk`
# at a different threshold of patrol effort
def get_map_from_csv(self, filename):
data = pd.read_csv(filename)
print(' creating map from file {}'.format(filename))
# discard first column: index of grid cell
data.drop(data.columns[0], axis=1, inplace=True)
if data.shape[1] > 1:
raise Exception('ambiguous input: filename {} has more than one value column'.format(filename))
idx = self.get_indices_from_gridmap()
map = np.zeros(self.gridmap.shape)
map[idx] = data.values[:,0]
return map
# maps is a dictionary of {map_name : map}
def save_maps_to_csv(self, filename_out, maps):
idx = self.get_indices_from_gridmap()
map_names = list(maps.keys())
data = {'x_idx': idx[1], 'y_idx': idx[0]}
for i in range(len(maps)):
map_name = map_names[i]
map = maps[map_name]
data[map_name] = map[idx]
data_df = pd.DataFrame(data)
data_df.to_csv(filename_out)
# scale and transform to real crs coordinates
def scale_to_real(self, shape):
assert type(shape) == gpd.GeoDataFrame
shape.geometry = shape.geometry.translate(self.x_min, self.y_min)
shape.geometry = shape.geometry.scale(xfact=self.resolution, yfact=self.resolution, origin=(self.x_min, self.y_min))
return shape
###########################################################
# visualize
###########################################################
# options:
# - log_norm: whether rendering is displayed as log.
# useful for past patrol effort
# - min_value and max_value: bounds on the colorbar scale
# - plot_patrol_post: whether to display patrol posts in images
def save_map(self, feature_map, feature_name, cmap='Greens', log_norm=False, min_value=None, max_value=None, plot_title=True, plot_patrol_post=True):
# mask feature map
feature_map = self.mask_to_grid(feature_map)
if min_value is None:
min_value = feature_map.min()
if max_value is None:
max_value = feature_map.max()
fig, ax = plt.subplots()
if log_norm:
a = plt.imshow(np.flipud(feature_map), interpolation='none', cmap=cmap, extent=[0, self.gridmap.shape[1], 0, self.gridmap.shape[0]], vmin=min_value, vmax=max_value, norm=LogNorm())
else:
a = plt.imshow(np.flipud(feature_map), interpolation='none', cmap=cmap, extent=[0, self.gridmap.shape[1], 0, self.gridmap.shape[0]], vmin=min_value, vmax=max_value)
plt.colorbar(a)
# set plot title and labels
if plot_title:
plt.title(feature_name)
#plt.xticks(np.arange(0,mx+1),[self.min_xval+resolution*i for i in range(mx+1)], rotation=60)
plt.xlabel('x', fontsize=6)
#plt.yticks(np.arange(0,my+1),[self.min_yval+resolution*i for i in range(my+1)])
plt.ylabel('y', fontsize=6)
# plot patrol post locations
if plot_patrol_post and self.patrol_posts is not None:
for index, row in self.patrol_posts.iterrows():
sx = row['x']
sy = row['y']
plt.plot([sx+0.5], [sy+0.5], marker='o', markersize=5, color='aqua', markeredgewidth=1, markeredgecolor='blue')
# set background color
axes = plt.gca()
axes.set_facecolor((0,0,0))
plt.savefig(self.output_filepath + 'plot_{}.png'.format(feature_name))
plt.close()
# title - string
# masked_map - masked np array of map to plot
# shapefiles - dict of (string, GeoDataFrame) files
# crs_out - string that specifies crs of the shapefiles
def save_map_with_features(self, title, masked_map, shapefiles, crs_out, cmap='Reds', vmin=None, vmax=None, log_norm=False):
map_grid = map_to_color_grid(masked_map)
# prepare plot
fig, ax = plt.subplots(figsize=(10,10), dpi=150)
ax.set_facecolor((.9,.9,.9)) # gray background
ax.set_aspect('equal') # displays proportionally
# hide tick labels
ax.tick_params(labelbottom=False)
ax.tick_params(labelleft=False)
# make shapefiles directory
if not os.path.exists(self.output_filepath + 'shapefiles/'):
os.makedirs(self.output_filepath + 'shapefiles/')
# create output shapefile and save
map_grid.crs = crs_out # {'init': crs_out}.
map_grid = self.scale_to_real(map_grid)
if log_norm:
map_grid.plot(ax=ax, column='value', cmap=cmap, legend=True, vmin=vmin, vmax=vmax, norm=LogNorm())
else:
map_grid.plot(ax=ax, column='value', cmap=cmap, legend=True, vmin=vmin, vmax=vmax)
map_grid.to_file('{}shapefiles/map_grid_{}.shp'.format(self.output_filepath, title))
# plot shapefiles
shapefiles['boundary'].plot(ax=ax, facecolor='none', edgecolor='black', linewidth=.5) # facecolor='#e4e8c6'
if 'patrol_posts' in shapefiles:
shapefiles['patrol_posts'].plot(marker='o', markersize=20, color='blue', ax=ax)
if 'roads' in shapefiles:
shapefiles['roads'].plot(ax=ax, facecolor='none', edgecolor='#68200c', linewidth=.5)
if 'water' in shapefiles:
shapefiles['water'].plot(ax=ax, facecolor='#40b4d1', edgecolor='black', linewidth=.5)
if 'rivers' in shapefiles:
shapefiles['rivers'].plot(ax=ax, facecolor='none', edgecolor='#40b4d1', linewidth=.5)
if 'patrol_blocks' in shapefiles:
shapefiles['patrol_blocks'].plot(ax=ax, facecolor='none', edgecolor='black', linewidth=.5)
if 'core_zone' in shapefiles:
shapefiles['core_zone'].plot(ax=ax, facecolor='none', edgecolor='green', linewidth=2)
if 'buffer' in shapefiles:
shapefiles['buffer'].plot(ax=ax, facecolor='none', edgecolor='#666666', linewidth=2)
# save out plot
plt.title('{}'.format(title))
fig.savefig('{}map_{}.png'.format(self.output_filepath, title))
plt.close()
# NOTE: this .npy file must be saved from PatrolProblem.py
# (or from this script)
def get_riskmap_from_npy(self, npy_filename):
riskmap = np.load(npy_filename)
return riskmap
# get list of np.arrays, where each is a map of predicted risk`
# at a different threshold of patrol effort
def get_maps_from_csv(self, maps_filename):
num_extra_cols = 4
map_data = pd.read_csv(maps_filename)
map_data = self.shift_data_coords(map_data)
num_maps = len(map_data.columns) - num_extra_cols
maps = []
for i in range(num_maps):
print(' creating map: {}'.format(map_data.columns[num_extra_cols + i]))
maps.append(np.zeros(self.gridmap.shape))
for index, row in map_data.iterrows():
for i in range(num_maps):
maps[i][int(row['y'])][int(row['x'])] = row[[i+num_extra_cols]]
for i in range(num_maps):
maps[i] = self.mask_to_grid(maps[i])
return map_data.columns[num_extra_cols:], maps
def shift_data_coords(self, data):
assert self.x_min is not None
assert self.y_min is not None
# compute point by scaling down by resolution
data['x'] = (data['x'] - self.x_min) / self.resolution
data['y'] = (data['y'] - self.y_min) / self.resolution
# convert to int
data['x'] = data['x'].astype(int)
data['y'] = data['y'].astype(int)
return data
# create gridmap, which is a binary mask of cells within the boundary
# 0 => point is not inside boundary
# 1 => point is inside boundary
def get_gridmap(self, static_features_filename):
data = pd.read_csv(static_features_filename)
# compute shifting for each row
self.x_min = int(np.min(data['x']))
self.y_min = int(np.min(data['y']))
data = self.shift_data_coords(data)
# set max values after scaling down by resolution
scaled_x_max = int(np.max(data['x']))
scaled_y_max = int(np.max(data['y']))
# create gridmap
gridmap = [[0 for x in range(scaled_x_max+1)] for y in range(scaled_y_max+1)]
# gridmap = np.zeros((y_max+1, x_max+1))
for index, row in data.iterrows():
gridmap[int(row['y'])][int(row['x'])] = 1
gridmap = np.ma.masked_where(gridmap == 1, gridmap)
self.gridmap = gridmap
return gridmap
# read in and process all features from static features CSV
def load_static_feature_maps(self, static_features_filename):
print('load static features from {}...'.format(static_features_filename))
data = pd.read_csv(static_features_filename)
data = self.shift_data_coords(data)
# create feature maps
static_feature_names = list(data.columns[4:]) + ['Null']
feature_maps = {}
for static_feature_name in static_feature_names:
print(' processing feature: {}'.format(static_feature_name))
if static_feature_name == 'Null':
feature_map = np.zeros(self.gridmap.shape)
for index, row in data.iterrows():
feature_map[int(row['y'])][int(row['x'])] = 0
else:
feature_map = np.zeros(self.gridmap.shape)
for index, row in data.iterrows():
feature_map[int(row['y'])][int(row['x'])] = row[static_feature_name]
feature_maps[static_feature_name] = feature_map
return feature_maps
# get past patrol effort map
def get_past_patrol(self, data_filename):
data =
|
pd.read_csv(data_filename)
|
pandas.read_csv
|
# Copyright 2019 <NAME>, Inc. and the University of Edinburgh. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
import os
import sys
from typing import List
import dill
import numpy as np
import pandas as pd
import sentencepiece as spm
from sumeval.metrics.rouge import RougeCalculator
from sumeval.metrics.bleu import BLEUCalculator
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
from torchtext.vocab import Vocab
from torchtext.data import Field, RawField, TabularDataset, BucketIterator
# from beam_search import Search, BeamSearch
import time
from models import LabelSmoothingLoss, TransformerModel, SumEvaluator, denumericalize
from utils import Config
if __name__ == "__main__":
"""
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--individual',
action='store_true')
args = parser.parse_args()
print("Option: --individual={}".format(args.individual))
individual = args.individual
"""
individual = False
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if len(sys.argv) < 5:
print("Config file(s) are missing")
print("Usage: {} <prepare_conf> <train_conf> <aggregate_conf> <generate_conf>")
sys.exit(1)
p_conf = Config(sys.argv[1])
t_conf = Config(sys.argv[2])
a_conf = Config(sys.argv[3]) # can be CSV
g_conf = Config(sys.argv[4])
assert p_conf.conf_type == "prepare"
assert t_conf.conf_type == "train"
assert a_conf.conf_type == "aggregate"
assert g_conf.conf_type == "generate"
verbose = 0
# Check if the method is valid
assert g_conf["method"] in ["greedy", "beam"]
# Basepath
if "BASEPATH" not in os.environ:
basepath = "."
else:
basepath = os.environ["BASEPATH"]
# model filepath / output filepath
model_filepath = os.path.join(basepath,
"model",
"{}_op2text_{}.pt".format(p_conf.conf_name,
t_conf.conf_name))
output_filepath = os.path.join(basepath,
"output",
"{}_op2text_{}_{}_{}.csv".format(p_conf.conf_name,
t_conf.conf_name,
a_conf.conf_name,
g_conf.conf_name))
output_dirpath = os.path.dirname(output_filepath)
if not os.path.exists(output_dirpath):
os.makedirs(output_dirpath)
# Load Fields
with open(model_filepath.replace(".pt", "_IN_TEXT.field"), "rb") as fin:
IN_TEXT = dill.load(fin)
with open(model_filepath.replace(".pt", "_OUT_TEXT.field"), "rb") as fin:
OUT_TEXT = dill.load(fin)
with open(model_filepath.replace(".pt", "_ID.field"), "rb") as fin:
ID = dill.load(fin)
# Data file
data_dirpath = os.path.join(basepath,
"data",
"{}".format(p_conf.conf_name))
train_filepath = os.path.join(data_dirpath,
"train.csv")
valid_filepath = os.path.join(data_dirpath,
"dev.csv")
test_filepath = os.path.join(data_dirpath,
"test.csv")
agg_test_filepath = os.path.join(data_dirpath,
"aggregate{}.csv".format(a_conf.get_agg_name()))
assert os.path.exists(train_filepath)
assert os.path.exists(valid_filepath)
assert os.path.exists(test_filepath)
assert os.path.exists(agg_test_filepath)
agg_test_df =
|
pd.read_csv(agg_test_filepath)
|
pandas.read_csv
|
###########################################################################################################################
# SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM SIM #
###########################################################################################################################
import os
from datetime import datetime
import numpy as np
import pandas as pd
from transform.extract.download_SIM import download_DOXXaaaa, download_table_dbf, download_table_cnv
"""
Módulo de limpeza/tratamento de dados do SIM.
"""
# Função para converter um "value" num certo "type" de objeto ou caso não seja possível utiliza o valor "default"
def tryconvert(value, default, type):
try:
return type(value)
except (ValueError, TypeError):
return default
# Classe de dados principais do SIM
class DataSimMain:
# Construtor
def __init__(self, state, year):
self.state = state
self.year = year
# Método para ler como um objeto pandas DataFrame o arquivo principal de dados do SIM e adequar e formatar suas...
# colunas e valores
def get_DOXXaaaa_treated(self):
# Lê o arquivo "dbc" ou "parquet", se já tiver sido baixado, como um objeto pandas DataFrame
dataframe = download_DOXXaaaa(self.state, self.year)
print(f'O número de linhas do arquivo DO{self.state}{self.year} é {dataframe.shape[0]}.')
for coluna in dataframe.columns.values:
dataframe[coluna] = dataframe[coluna].apply(lambda x: x if '\x00' not in x else '')
# Colunas definidas como necessárias no objeto pandas DataFrame que incrementará a tabela dobr
lista_columns = np.array(['NUMERODO', 'CODINST', 'TIPOBITO', 'DTOBITO', 'HORAOBITO', 'NUMSUS',
'NATURAL', 'CODMUNNATU', 'DTNASC', 'IDADE', 'SEXO', 'RACACOR', 'ESTCIV',
'ESC', 'ESC2010', 'OCUP', 'CODMUNRES', 'LOCOCOR', 'CODESTAB', 'CODMUNOCOR',
'TPMORTEOCO', 'ASSISTMED', 'EXAME', 'CIRURGIA', 'NECROPSIA', 'LINHAA',
'LINHAB', 'LINHAC', 'LINHAD', 'LINHAII', 'CAUSABAS', 'CRM', 'DTATESTADO',
'CIRCOBITO', 'ACIDTRAB', 'FONTE', 'TPPOS', 'DTINVESTIG', 'CAUSABAS_O',
'DTCADASTRO', 'ATESTANTE', 'FONTEINV', 'DTRECEBIM', 'ATESTADO', 'ESCMAEAGR1',
'ESCFALAGR1', 'STDOEPIDEM', 'STDONOVA', 'DIFDATA', 'DTCADINV', 'TPOBITOCOR',
'DTCONINV', 'FONTES'])
# Criação de um objeto pandas DataFrame vazio com as colunas especificadas acima
df = pd.DataFrame(columns=lista_columns)
# Colocação dos dados da variável "dataframe" na variável "df" nas colunas de mesmo nome preenchendo...
# automaticamente com o float NaN as colunas da variável "df" não presentes na variável dataframe
for col in df.columns.values:
for coluna in dataframe.columns.values:
if coluna == col:
df[col] = dataframe[coluna].tolist()
break
# Coloca na variável "dif_set" o objeto array dos nomes das colunas da variável "df" que não estão...
# presentes na variável "dataframe"
dif_set = np.setdiff1d(df.columns.values, dataframe.columns.values)
# Substitui o float NaN pela string vazia as colunas da variável "df" não presentes na variável "dataframe"
for col in dif_set:
df[col].replace(np.nan, '', inplace=True)
# Exclui o último dígito numérico das colunas identificadas, o qual corresponde ao dígito de controle do...
# código do município
# Foi detectado que para alguns municípios o cálculo do dígito de controle não é válido
# Esse dígito de controle esteve presente nos arquivos DOXXxxxx até o ano de 2005 (a confirmar!)
if len(df.loc[0, 'CODMUNNATU']) == 7:
df['CODMUNNATU'].replace(regex='.$',value='', inplace=True)
if len(df.loc[0, 'CODMUNRES']) == 7:
df['CODMUNRES'].replace(regex='.$',value='', inplace=True)
if len(df.loc[0, 'CODMUNOCOR']) == 7:
df['CODMUNOCOR'].replace(regex='.$',value='', inplace=True)
# Simplifica/corrige a apresentação dos dados das colunas especificadas
df['HORAOBITO'] = df['HORAOBITO'].apply(lambda x: x[:4] if len(x) > 4 else x)
df['NATURAL'] = df['NATURAL'].apply(lambda x: x.zfill(3))
df['OCUP'] = df['OCUP'].apply(lambda x: x.zfill(6))
df['OCUP'] = df['OCUP'].apply(str.strip)
df['OCUP'] = df['OCUP'].apply(lambda x: x if len(x) == 6 else '')
df['CODESTAB'] = df['CODESTAB'].apply(lambda x: x.zfill(7))
for col in np.array(['ESCMAEAGR1', 'ESCFALAGR1']):
for i in np.array(['00', '01', '02', '03', '04', '05', '06', '07', '08', '09']):
df[col].replace(i, str(int(i)), inplace=True)
# Atualiza/corrige os labels das colunas especificadas
df['NATURAL'].replace(['000', '999'], '', inplace=True)
df['NATURAL'].replace('800', '001', inplace=True)
df['NATURAL'].replace(['00.', '8s9'], '', inplace=True)
for col in np.array(['DTOBITO', 'DTNASC']):
df[col] = df[col].apply(lambda x: x if len(x) == 8 else '')
df[col] = df[col].apply(lambda x: x if ' ' not in x else '')
df[col] = df[col].apply(lambda x: x if '/' not in x else '')
df[col] = df[col].apply(lambda x: x if '¾' not in x else '')
df[col] = df[col].apply(lambda x: x if 'ó' not in x else '')
df[col] = df[col].apply(lambda x: x if 1 <= tryconvert(x[0:2], 0, int) <= 31 else '')
df[col] = df[col].apply(lambda x: x if 1 <= tryconvert(x[2:4], 0, int) <= 12 else '')
for col in np.array(['CODMUNNATU', 'CODMUNRES', 'CODMUNOCOR']):
df[col].replace(['000000', '150475', '421265', '422000', '431454',
'500627', '990002', '990010', '990014', '999999'], '', inplace=True)
df[col].replace([str(i) for i in range(334501, 334531)], '330455', inplace=True)
df[col].replace([str(i) for i in range(358001, 358059)], '355030', inplace=True)
df[col].replace(['530000', '530500', '530600', '530800', '530900', '531700', '539901',
'539902', '539904', '539905', '539906', '539907', '539914', '539916',
'539918', '539919', '539920', '539921', '539924', '539925'], '530010', inplace=True)
df['SEXO'].replace('1', 'M', inplace=True) # Label "M" de Masculino
df['SEXO'].replace('2', 'F', inplace=True) # Label "F" de Feminino
df['SEXO'].replace('0', '3', inplace=True)
df['SEXO'].replace('3', 'IN', inplace=True) # Label "IN" de INdefinido
df['ESTCIV'].replace(['²', '±'], '', inplace=True)
df['ESC'].replace('A', '', inplace=True)
df['OCUP'] = df['OCUP'].apply(lambda x: x if ' ' not in x else '')
df['OCUP'] = df['OCUP'].apply(lambda x: x if '.' not in x else '')
df['OCUP'] = df['OCUP'].apply(lambda x: x if '+' not in x else '')
df['OCUP'] = df['OCUP'].apply(lambda x: x if 'X' not in x else '')
df['OCUP'].replace('000000', '', inplace=True)
df['CODESTAB'].replace('0000000', '', inplace=True)
df['CODESTAB'].replace('2306840', '2461234', inplace=True)
df['CODESTAB'].replace('2464276', '2726688', inplace=True)
df['CODESTAB'].replace('2517825', '3563308', inplace=True)
df['CODESTAB'].replace('2772299', '2465140', inplace=True)
df['CODESTAB'].replace('3064115', '3401928', inplace=True)
df['TPMORTEOCO'].replace('8', '6', inplace=True)
df['TPMORTEOCO'].replace('9', '7', inplace=True)
for col in np.array(['ASSISTMED', 'EXAME', 'CIRURGIA', 'NECROPSIA', 'ACIDTRAB']):
df[col].replace(['0', '3', '4', '5', '6', '7', '8', '9'], '', inplace=True)
df[col].replace('2', '0', inplace=True) # "2", representativo de "Não", é convertido para o objeto...
# string "0" do domínio binário
for col in np.array(['CAUSABAS', 'CAUSABAS_O']):
df[col].replace('B501', 'B508', inplace=True)
df[col].replace('B656', 'B653', inplace=True)
df[col].replace('C141', 'C140', inplace=True)
df[col].replace('M723', 'M724', inplace=True)
df[col].replace('M725', 'M728', inplace=True)
df[col].replace('N975', 'N978', inplace=True)
df[col].replace('Q314', 'P288', inplace=True)
df[col].replace('Q350', 'Q351', inplace=True)
df[col].replace('Q352', 'Q353', inplace=True)
df[col].replace('Q354', 'Q355', inplace=True)
df[col].replace(['Q356', 'Q358'], 'Q359', inplace=True)
df[col].replace('R500', 'R508', inplace=True)
df[col].replace('R501', 'R500', inplace=True)
df[col].replace(['X590', 'X591', 'X592', 'X593', 'X594',
'X595', 'X596', 'X597', 'X598'], 'X599', inplace=True)
df[col].replace('Y34', 'Y349', inplace=True)
df[col].replace('Y447', 'Y448', inplace=True)
df['CAUSABAS_O'].replace(regex='.$',value='', inplace=True)
df['TPPOS'].replace('2', '0', inplace=True) # "2", representativo de "Não", é convertido para o objeto...
# string "0" do domínio binário
df['DTATESTADO'].replace('09201608', '', inplace=True)
df['DTATESTADO'] = df['DTATESTADO'].apply(lambda x: x if len(x) == 8 else '')
df['DTATESTADO'] = df['DTATESTADO'].apply(lambda x: x if x[2:4] != '20' else '')
df['CIRCOBITO'].replace(['á', 'ß', 'C'], '', inplace=True)
for col in np.array(['ESCMAEAGR1', 'ESCFALAGR1']):
df[col].replace('9', '', inplace=True)
df['TPOBITOCOR'].replace('0', '', inplace=True)
# Atribui um único label para uma mesma significação nas colunas especificadas
df['TIPOBITO'].replace(['0', '3', '4', '5', '6', '7', '8' '9'], '', inplace=True)
for col in np.array(['RACACOR', 'ESTCIV', 'ESC', 'LOCOCOR', 'ATESTANTE']):
df[col].replace(['0', '6', '7', '8', '9'], '', inplace=True)
df['ESC2010'].replace(['6', '7', '8', '9'], '', inplace=True)
df['TPMORTEOCO'].replace(['0', '7', '8', '9'], '', inplace=True)
for col in np.array(['CIRCOBITO', 'FONTE']):
df[col].replace(['0', '5', '6', '7', '8', '9'], '', inplace=True)
df['FONTEINV'].replace(['0', '9'], '', inplace=True)
# Substitui uma string vazia pela string "NA" nas colunas de foreign keys
# A coluna FONTES é apenas considerada como tal pois recebe tratamento específico mais adiante
for col in np.array(['TIPOBITO', 'NATURAL', 'CODMUNNATU', 'RACACOR',
'ESTCIV', 'ESC', 'ESC2010', 'OCUP', 'CODMUNRES',
'LOCOCOR', 'CODESTAB', 'CODMUNOCOR', 'TPMORTEOCO',
'CAUSABAS', 'CIRCOBITO', 'FONTE', 'CAUSABAS_O',
'ATESTANTE', 'FONTEINV', 'ESCMAEAGR1', 'ESCFALAGR1',
'TPOBITOCOR', 'FONTES']):
df[col].replace('', 'NA', inplace=True)
# Substitui uma string vazia por None nas colunas de atributos especificadas
for col in np.array(['CODINST', 'HORAOBITO', 'NUMSUS', 'SEXO', 'LINHAA',
'LINHAB', 'LINHAC', 'LINHAD', 'LINHAII', 'CRM', 'ATESTADO']):
df[col].replace('', None, inplace=True)
# Divisão da coluna "FONTES" em seis colunas conforme Dicionário de Dados da Tabela DOM ("M" de...
# investigação materna)
df['FONTES'] = df['FONTES'].apply(lambda x: x if len(x) == 6 else x)
for col in np.array(['FONTENTREV', 'FONTEAMBUL', 'FONTEPRONT', 'FONTESVO', 'FONTEIML', 'FONTEPROF']):
df[col] = df['FONTES'].apply(lambda x: 'NA' if x == 'NA' else x[0]) # O valor quando a condição...
# "else" se verifica é "S" de "Sim"
df[col].replace('X', '0', inplace=True) # Substitui a string "X" por "0" de "Não" tornando a...
# coluna "col" com domínio "binário"
df[col].replace('S', '1', inplace=True) # Substitui a string "X" por "1" de "Sim" tornando a...
# coluna "col" com domínio "binário"
# Eliminação da coluna "FONTES" por se tornar desnecessária com a adição das seis colunas especificadas acima
df.drop('FONTES', axis=1, inplace=True)
# Converte do tipo string para datetime as colunas especificadas substituindo as datas faltantes ("NaT") pela...
# data futura "2099-01-01" para permitir a inserção das referidas colunas no SGBD postgreSQL
for col in np.array(['DTOBITO', 'DTNASC', 'DTATESTADO', 'DTINVESTIG',
'DTCADASTRO', 'DTRECEBIM', 'DTCADINV', 'DTCONINV']):
df[col] = df[col].apply(lambda x: datetime.strptime(x, '%d%m%Y').date() \
if x != '' else datetime(2099, 1, 1).date())
# Verifica se as datas das colunas especificadas são absurdas e em caso afirmativo as substitui pela...
# data futura "2099-01-01"
for col in np.array(['DTOBITO', 'DTATESTADO', 'DTINVESTIG',
'DTCADASTRO', 'DTRECEBIM', 'DTCADINV', 'DTCONINV']):
df[col] = df[col].apply(lambda x: x if datetime(2000, 12, 31).date() < x < \
datetime(2020, 12, 31).date() else datetime(2099, 1, 1).date())
df['DTNASC'] = df['DTNASC'].apply(lambda x: x if datetime(1850, 12, 31).date() < x < \
datetime(2020, 12, 31).date() else datetime(2099, 1, 1).date())
# Computa a diferença entre as datas de óbito e de nascimento em dias e a aloca como a coluna "IDADE"...
# do objeto pandas DataFrame
df['IDADE'] = df['DTOBITO'] - df['DTNASC']
# Converte os valores da coluna IDADE de datetime.timedelta para string
# Ainda na mesma linha, cria uma lista de dois objetos string de cada valor da coluna IDADE e aproveita...
# apenas o primeiro objeto de cada lista
df['IDADE'] = df['IDADE'].apply(lambda x: str(x).split(' day')[0])
# Os valores em que a operação anterior forneceu a string "0:00:00" são substituídos pela string...
# "0" (RN que viveram menos de um dia)
df['IDADE'].replace('0:00:00', '0', inplace=True)
# Converte os valores da coluna IDADE de string para float (em dias) atribuindo o float NaN para as...
# string que começam com "-"
df['IDADE'] = df['IDADE'].apply(lambda x: np.nan if x[0] == '-' else float(x))
# Transforma o valor da coluna referida de dias para anos mantendo cinco casas decimais
df['IDADE']=df['IDADE'].div(365).round(5)
# Converte do tipo object para int ou para None as colunas de atributos de valores binários (0 ou 1)
for col in np.array(['ASSISTMED', 'EXAME', 'CIRURGIA', 'NECROPSIA', 'ACIDTRAB',
'TPPOS', 'STDOEPIDEM', 'STDONOVA', 'FONTENTREV', 'FONTEAMBUL',
'FONTEPRONT', 'FONTESVO', 'FONTEIML', 'FONTEPROF']):
df[col] = df[col].apply(lambda x: tryconvert(x, None, int))
# Converte do tipo object para float sem casas decimais as colunas de atributos de valores...
# representativos de quantidades ou para o valor None caso a coluna esteja com a string vazia
df['DIFDATA'] = df['DIFDATA'].apply(lambda x: round(float(x),0) if x != '' else None)
# Reordena o objeto pandas DataFrame por ordem crescente de valores da coluna DTOBITO
df.sort_values(by=['DTOBITO'], inplace=True)
# Renumera os índices devido à alteração efetivada no passo anterior
df.reset_index(drop=True, inplace=True)
# Renomeia colunas
df.rename(index=str, columns={'TIPOBITO': 'TIPOBITO_ID', 'NATURAL': 'NATURALE_ID',
'CODMUNNATU': 'CODMUNNATU_ID', 'RACACOR': 'RACACOR_ID',
'ESTCIV': 'ESTCIV_ID', 'ESC': 'ESC_ID',
'ESC2010': 'ESC2010_ID', 'OCUP': 'OCUP_ID',
'CODMUNRES': 'CODMUNRES_ID', 'LOCOCOR': 'LOCOCOR_ID',
'CODESTAB': 'CODESTAB_ID', 'CODMUNOCOR': 'CODMUNOCOR_ID',
'TPMORTEOCO': 'TPMORTEOCO_ID', 'CAUSABAS': 'CAUSABAS_ID',
'CIRCOBITO': 'CIRCOBITO_ID', 'FONTE': 'FONTE_ID',
'CAUSABAS_O': 'CAUSABAS_O_ID', 'ATESTANTE': 'ATESTANTE_ID',
'FONTEINV': 'FONTEINV_ID', 'ESCMAEAGR1': 'ESCMAEAGR1_ID',
'ESCFALAGR1': 'ESCFALAGR1_ID', 'TPOBITOCOR': 'TPOBITOCOR_ID'}, inplace=True)
print(f'Tratou o arquivo DO{self.state}{self.year} (shape final: {df.shape[0]} x {df.shape[1]}).')
return df
# Classe de dados auxiliares do SIM
class DataSimAuxiliary:
# Construtor
def __init__(self, path):
self.path = path
# Função para adequar e formatar as colunas e valores da TCC TIPOBITO (arquivo TIPOBITO.cnv)
def get_TIPOBITO_treated(self):
# Conversão da TCC TIPOBITO para um objeto pandas DataFrame
file_name = 'TIPOBITO'
df = download_table_cnv(file_name)
# Renomeia a coluna SIGNIFICACAO
df.rename(index=str, columns={'SIGNIFICACAO': 'TIPO'}, inplace=True)
# Inserção da primary key "NA" na tabela de que trata esta função para retratar "missing value"
df.loc[df.shape[0]] = ['NA', 'NOT AVAILABLE']
return df
# Função para adequar e formatar as colunas e valores da TCC NAT1212 (arquivo NAT1212.cnv)
def get_NAT1212_treated(self):
# Conversão da TCC NAT1212 para um objeto pandas DataFrame
file_name = 'NAT1212'
df = download_table_cnv(file_name)
# Renomeia a coluna SIGNIFICACAO
df.rename(index=str, columns={'SIGNIFICACAO': 'LOCAL'}, inplace=True)
# Adiciona zeros à esquerda nos valores (tipo string) da coluna "ID" do objeto "df" até formar uma...
# "string" de tamanho = 3
df['ID'] = df['ID'].apply(lambda x: x.zfill(3))
# Upload do arquivo "xlsx" que contém os NATURAL presentes nos arquivos DOXXxxxx (a partir do ano...
# de 2001) e não presentes na TCC NAT1212. Ou seja, isso parece ser uma falha dos dados do Datasus
dataframe = pd.read_excel(self.path + 'NATURAL_OUT_NAT1212_ANOS_1997_2017' + '.xlsx')
# Converte a coluna "ID" do objeto "dataframe" de "int" para "string"
dataframe['ID'] = dataframe['ID'].astype('str')
# Adiciona zeros à esquerda nos valores (tipo string) da coluna "ID" do objeto "dataframe" até...
# formar uma "string" de tamanho = 3
dataframe['ID'] = dataframe['ID'].apply(lambda x: x.zfill(3))
# Adiciona a coluna "LOCAL" e respectivos valores ao objeto "dataframe"
dataframe['LOCAL'] = ['NAO PROVIDO NA TCC NAT1212'] * (dataframe.shape[0])
# Concatenação do objeto "dataframe" ao objeto "df"
frames = []
frames.append(df)
frames.append(dataframe)
dfinal = pd.concat(frames, ignore_index=True)
# Elimina eventuais linhas duplicadas tendo por base a coluna ID e mantém a primeira ocorrência
dfinal.drop_duplicates(subset='ID', keep='first', inplace=True)
# Ordena eventualmente as linhas por ordem crescente dos valores da coluna ID
dfinal.sort_values(by=['ID'], inplace=True)
# Reset eventualmente o index devido ao sorting prévio e à eventual eliminação de duplicates
dfinal.reset_index(drop=True, inplace=True)
# Inserção da primary key "NA" na tabela de que trata esta função para retratar célula de...
# string vazia da coluna "NATURAL_ID" da tabela DOBR
dfinal.loc[dfinal.shape[0]] = ['NA', 'NOT AVAILABLE']
return dfinal
# Função para adequar e formatar as colunas e valores da Tabela TABUF (arquivo TABUF.dbf)
def get_TABUF_treated(self):
# Conversão da Tabela TABUF para um objeto pandas DataFrame
file_name = 'TABUF'
df = download_table_dbf(file_name)
# Renomeia colunas especificadas
df.rename(index=str, columns={'CODIGO': 'ID', 'DESCRICAO': 'ESTADO'}, inplace=True)
# Reordena as colunas
df = df[['ID', 'ESTADO', 'SIGLA_UF']]
# Inserção da primary key "NA" na tabela de que trata esta função para retratar "missing value"
df.loc[df.shape[0]] = ['NA', 'NOT AVAILABLE', '?']
return df
# Função para adequar e formatar as colunas e valores da Tabela RSAUDE (do IBGE)
def get_RSAUDE_treated(self):
# Conversão da Tabela RSAUDE (em formato "xlsx") para um objeto pandas DataFrame
df = pd.read_excel(self.path + 'RSAUDE' + '.xlsx')
# Renomeia a coluna SIGNIFICACAO
df.rename(index=str, columns={'SIGNIFICACAO': 'REGIAO'}, inplace=True)
# Converte para string a coluna especificada
df['ID'] = df['ID'].astype('str')
# Inserção da primary key "NA" na tabela de que trata esta função para retratar "missing value"
df.loc[df.shape[0]] = ['NA', 'NOT AVAILABLE']
return df
# Método para adequar e formatar as colunas e valores da Tabela CADMUN (arquivo CADMUN.dbf)
def get_CADMUN_treated(self):
# Conversão da Tabela CADMUN para um objeto pandas DataFrame
file_name = 'CADMUN'
df1 = download_table_dbf(file_name)
# Renomeia as colunas especificadas
df1.rename(index=str, columns={'MUNCOD': 'ID', 'UFCOD': 'UFCOD_ID'}, inplace=True)
# Drop a linha inteira em que a coluna "ID" tem o valor especificado por não representar nenhum município
df1 = df1.drop(df1[df1['ID']=='000000'].index)
# Remove colunas indesejáveis do objeto pandas DataFrame
df1 = df1.drop(['MUNSINON', 'MUNSINONDV', 'MESOCOD', 'MICROCOD', 'MSAUDCOD',
'RSAUDCOD', 'CSAUDCOD', 'RMETRCOD', 'AGLCOD'], axis=1)
# Substitui uma string vazia pela string "?" nas colunas especificadas
for col in ['SITUACAO', 'MUNSINP', 'MUNSIAFI', 'MUNNOME', 'MUNNOMEX', 'OBSERV',
'AMAZONIA', 'FRONTEIRA', 'CAPITAL', 'ANOINST', 'ANOEXT', 'SUCESSOR']:
df1[col].replace('', '?', inplace=True)
# Substitui uma string vazia pela string "NA" nas colunas especificadas
df1['UFCOD_ID'].replace('', 'NA', inplace=True)
# Substitui uma string vazia pelo float "NaN" nas colunas especificadas
for col in ['LATITUDE', 'LONGITUDE', 'ALTITUDE', 'AREA']:
df1[col].replace('', np.nan, inplace=True)
# Converte do tipo object para float as colunas especificadas
df1[['LATITUDE', 'LONGITUDE', 'ALTITUDE', 'AREA']] = \
df1[['LATITUDE', 'LONGITUDE', 'ALTITUDE', 'AREA']].astype('float')
# Coloca todas as string das colunas especificadas como UPPER CASE
df1['MUNNOME'] = df1['MUNNOME'].apply(lambda x: x.upper())
df1['MUNNOMEX'] = df1['MUNNOMEX'].apply(lambda x: x.upper())
# Insere uma linha referente ao Município de Nazária/PI não constante originalmente do arquivo
df1.loc[df1.shape[0]] = ['220672', '2206720', 'ATIVO', '?', '?', 'NAZÁRIA', 'NAZARIA', '?',
'N', 'N', 'N', '22', '?', '?', '?', np.nan, np.nan, np.nan, 363.589]
# Ordena as linhas de "df1" por ordem crescente dos valores da coluna ID
df1.sort_values(by=['ID'], inplace=True)
# Reset o index devido ao sorting prévio e à exclusão e inclusão das linhas referidas acima
df1.reset_index(drop=True, inplace=True)
# Conversão da Tabela rl_municip_regsaud para um objeto pandas DataFrame
file_name = 'rl_municip_regsaud'
df2 = download_table_dbf(file_name)
# Renomeia as colunas especificadas
df2.rename(index=str, columns={'CO_MUNICIP': 'ID', 'CO_REGSAUD': 'RSAUDE_ID'}, inplace=True)
# Faz o merge de "df1" e "df2" pela coluna ID tendo por base "df1"
df = pd.merge(df1, df2, how='left', left_on='ID', right_on='ID')
# Converte o float NaN para a string "NA"
df['RSAUDE_ID'].replace(np.nan, 'NA', inplace=True)
# Reordena as colunas priorizando as "mais" relevantes
df = df[['ID', 'MUNNOME', 'MUNNOMEX', 'MUNCODDV', 'OBSERV', 'SITUACAO', 'MUNSINP',
'MUNSIAFI', 'UFCOD_ID', 'AMAZONIA', 'FRONTEIRA', 'CAPITAL', 'RSAUDE_ID',
'LATITUDE', 'LONGITUDE', 'ALTITUDE', 'AREA', 'ANOINST', 'ANOEXT', 'SUCESSOR']]
# Inserção da primary key "NA" na tabela de que trata esta função para retratar "missing value"
df.loc[df.shape[0]] = ['NA', 'NOT AVAILABLE', '?', '?', '?', '?', '?', '?', 'NA', '?',
'?', '?', 'NA', np.nan, np.nan, np.nan, np.nan, '?', '?', '?']
return df
# Função para adequar e formatar as colunas e valores da TCC RACA (arquivo RACA.cnv)
def get_RACA_treated(self):
# Conversão da TCC RACA para um objeto pandas DataFrame
file_name = 'RACA'
df = download_table_cnv(file_name)
# Renomeia a coluna SIGNIFICACAO
df.rename(index=str, columns={'SIGNIFICACAO': 'TIPO'}, inplace=True)
# Inserção da primary key "NA" na tabela de que trata esta função para retratar "missing value"
df.loc[df.shape[0]] = ['NA', 'NOT AVAILABLE']
return df
# Função para adequar e formatar as colunas e valores da TCC ESTCIV (arquivo ESTCIV.cnv)
def get_ESTCIV_treated(self):
# Conversão da TCC ESTCIV para um objeto pandas DataFrame
file_name = 'ESTCIV'
df = download_table_cnv(file_name)
# Renomeia a coluna SIGNIFICACAO
df.rename(index=str, columns={'SIGNIFICACAO': 'SITUACAO'}, inplace=True)
# Inserção da primary key "NA" na tabela de que trata esta função para retratar "missing value"
df.loc[df.shape[0]] = ['NA', 'NOT AVAILABLE']
return df
# Função para adequar e formatar as colunas e valores da TCC INSTRUC (arquivo INSTRUC.cnv)
def get_INSTRUC_treated(self):
# Conversão da TCC INSTRUC para um objeto pandas DataFrame
file_name = 'INSTRUC'
df = download_table_cnv(file_name)
# Renomeia a coluna SIGNIFICACAO
df.rename(index=str, columns={'SIGNIFICACAO': 'FAIXA_DE_ANOS_INSTRUCAO'}, inplace=True)
# Inserção da primary key "NA" na tabela de que trata esta função para retratar "missing value"
df.loc[df.shape[0]] = ['NA', 'NOT AVAILABLE']
return df
# Função para adequar e formatar as colunas e valores da TCC ESCSERIE (arquivo ESCSERIE.cnv)
def get_ESCSERIE_treated(self):
# Conversão da TCC ESCSERIE para um objeto pandas DataFrame
file_name = 'ESCSERIE'
df = download_table_cnv(file_name)
# Renomeia a coluna SIGNIFICACAO
df.rename(index=str, columns={'SIGNIFICACAO': 'ESCOLARIDADE'}, inplace=True)
# Inserção da primary key "NA" na tabela de que trata esta função para retratar "missing value"
df.loc[df.shape[0]] = ['NA', 'NOT AVAILABLE']
return df
# Função para adequar e formatar as colunas e valores da Tabela TABOCUP (arquivo TABOCUP.dbf)
# e das TCC CBO2002 e OCUPA (arquivos CBO2002.cnv e OCUPA.cnv, respectivamente)
def get_TABOCUP_2TCC_treated(self):
# Conversão da Tabela TABOCUP para um objeto pandas DataFrame
file_name = 'TABOCUP'
df1 = download_table_dbf(file_name)
# Renomeia as colunas especificadas
df1.rename(index=str, columns={'CODIGO': 'ID', 'DESCRICAO': 'OCUPACAO'}, inplace=True)
# Ordena as linhas de "df1" por ordem crescente dos valores da coluna ID
df1.sort_values(by=['ID'], inplace=True)
# Elimina eventuais linhas duplicadas tendo por base a coluna ID e mantém a primeira ocorrência
df1.drop_duplicates(subset='ID', keep='first', inplace=True)
# Reset o index devido ao sorting prévio
df1.reset_index(drop=True, inplace=True)
# Adiciona zeros à esquerda nos valores (tipo string) da coluna "ID" do objeto "df1" até formar...
# uma "string" de tamanho = 6
df1['ID'] = df1['ID'].apply(lambda x: x.zfill(6))
# Conversão da TCC CBO2002 para um objeto pandas DataFrame
file_name = 'CBO2002'
df2 = download_table_cnv(file_name)
# Renomeia a coluna SIGNIFICACAO
df2.rename(index=str, columns={'SIGNIFICACAO': 'OCUPACAO'}, inplace=True)
# Adiciona zeros à esquerda nos valores (tipo string) da coluna "ID" do objeto "df2" até formar...
# uma "string" de tamanho = 6
df2['ID'] = df2['ID'].apply(lambda x: x.zfill(6))
# Conversão da TCC OCUPA para um objeto pandas DataFrame
file_name = 'OCUPA'
df3 = download_table_cnv(file_name)
# Renomeia a coluna SIGNIFICACAO
df3.rename(index=str, columns={'SIGNIFICACAO': 'OCUPACAO'}, inplace=True)
# Adiciona zeros à esquerda nos valores (tipo string) da coluna "ID" do objeto "df3" até formar...
# uma "string" de tamanho = 6
df3['ID'] = df3['ID'].apply(lambda x: x.zfill(6))
# Concatena os três objetos pandas DataFrame
frames = []
frames.append(df1)
frames.append(df2)
frames.append(df3)
df = pd.concat(frames, ignore_index=True)
# Elimina linhas duplicadas tendo por base a coluna ID e mantém a primeira ocorrência
df.drop_duplicates(subset='ID', keep='first', inplace=True)
# Ordena as linhas por ordem crescente dos valores da coluna "ID"
df.sort_values(by=['ID'], inplace=True)
# Reset o index devido ao sorting prévio e à eventual eliminação de duplicates
df.reset_index(drop=True, inplace=True)
# Upload do arquivo "xlsx" que contém os OCUP presentes nos arquivos DOXXaaaa (dos anos de...
# 1997 a 2017) e não presentes na Tabela TABOCUP e nas TCC CBO2002 e OCUPA. Ou seja, isso...
# parece ser uma falha dos dados do Datasus
dataframe = pd.read_excel(self.path + 'OCUP_OUT_TABOCUP_E_2TCC_ANOS_1997_2017' + '.xlsx')
# Converte a coluna "ID" do objeto "dataframe" de "int" para "string"
dataframe['ID'] = dataframe['ID'].astype('str')
# Adiciona zeros à esquerda nos valores (tipo string) da coluna "ID" do objeto "dataframe" até formar...
# uma "string" de tamanho = 6
dataframe['ID'] = dataframe['ID'].apply(lambda x: x.zfill(6))
# Adiciona a coluna "OCUPACAO" e respectivos valores ao objeto "dataframe"
dataframe['OCUPACAO'] = ['NAO PROVIDO EM TABOCUP.DBF E NAS TCC CBO2002/OCUPA'] * (dataframe.shape[0])
# Concatenação do objeto "dataframe" ao objeto "df"
frames = []
frames.append(df)
frames.append(dataframe)
dfinal = pd.concat(frames, ignore_index=True)
# Elimina eventuais linhas duplicadas tendo por base a coluna ID e mantém a primeira ocorrência
dfinal.drop_duplicates(subset='ID', keep='first', inplace=True)
# Ordena eventualmente as linhas por ordem crescente dos valores da coluna ID
dfinal.sort_values(by=['ID'], inplace=True)
# Reset eventualmente o index devido ao sorting prévio e à eventual eliminação de duplicates
dfinal.reset_index(drop=True, inplace=True)
# Inserção da primary key "NA" na tabela de que trata esta função para retratar "missing value"
dfinal.loc[dfinal.shape[0]] = ['NA', 'NOT AVAILABLE']
return dfinal
# Função para adequar e formatar as colunas e valores da TCC LOCOCOR (arquivo LOCOCOR.cnv)
def get_LOCOCOR_treated(self):
# Conversão da TCC LOCOCOR para um objeto pandas DataFrame
file_name = 'LOCOCOR'
df = download_table_cnv(file_name)
# Renomeia a coluna SIGNIFICACAO
df.rename(index=str, columns={'SIGNIFICACAO': 'LUGAR'}, inplace=True)
# Converte para string a coluna especificada
df['ID'] = df['ID'].astype('str')
# Inserção da primary key "NA" na tabela de que trata esta função para retratar "missing value"
df.loc[df.shape[0]] = ['NA', 'NOT AVAILABLE']
return df
# Função para adequar e formatar as colunas e valores da Tabela CNESDO18 do SIM (arquivo CNESDO18.dbf)...
# e da TCC ESTAB06 (arquivo ESTAB06.cnv)
# Além disso faz o "merge" a elas das TCC ESFERA e NAT_ORG (arquivos ESFERA.cnv e NAT_ORG.cnv, respectivamente)
def get_CNESDO18_3TCC_treated(self):
# Conversão da Tabela CNESDO18 para um objeto pandas DataFrame
file_name = 'CNESDO18'
df1 = download_table_dbf(file_name)
# Ordena as linhas de "df1" por ordem crescente dos valores da coluna CODESTAB
df1.sort_values(by=['CODESTAB'], inplace=True)
# Elimina eventuais linhas duplicadas tendo por base a coluna CODESTAB e mantém a primeira ocorrência
df1.drop_duplicates(subset='CODESTAB', keep='first', inplace=True)
# Reset o index devido ao sorting prévio
df1.reset_index(drop=True, inplace=True)
# Adiciona zeros à esquerda nos valores (tipo string) da coluna "CODESTAB" do objeto "df1" até formar...
# uma "string" de tamanho = 7
df1['CODESTAB'] = df1['CODESTAB'].apply(lambda x: x.zfill(7))
# Conversão da TCC ESTAB06 para um objeto pandas DataFrame
file_name = 'ESTAB06'
df2 = download_table_cnv(file_name)
df2.rename(index=str, columns={'ID': 'CODESTAB', 'SIGNIFICACAO': 'DESCESTAB'}, inplace=True)
# Adiciona zeros à esquerda nos valores (tipo string) da coluna "CODESTAB" do objeto "df2" até formar...
# uma "string" de tamanho = 7
df2['CODESTAB'] = df2['CODESTAB'].apply(lambda x: x.zfill(7))
# Concatena os dois objetos pandas DataFrame
frames = []
frames.append(df1)
frames.append(df2)
df = pd.concat(frames, ignore_index=True)
# Elimina linhas duplicadas
df.drop_duplicates(subset='CODESTAB', keep='first', inplace=True)
# Ordena as linhas por ordem crescente dos valores da coluna "CODESTAB"
df.sort_values(by=['CODESTAB'], inplace=True)
# Reseta os índices
df.reset_index(drop=True, inplace=True)
# Conversão da TCC ESFERA18 para um objeto pandas DataFrame
file_name = 'ESFERA18'
df3 = download_table_cnv(file_name)
# Adequa e formata a TCC ESFERA18
df3.rename(index=str, columns={'ID': 'CODESTAB', 'SIGNIFICACAO': 'ESFERA'}, inplace=True)
# Adiciona zeros à esquerda nos valores (tipo string) da coluna "CODESTAB" do objeto "df3" até formar...
# uma "string" de tamanho = 7
df3['CODESTAB'] = df3['CODESTAB'].apply(lambda x: x.zfill(7))
# Conversão da TCC NAT_ORG (já em formato "xlsx" e não "cnv") para um objeto pandas DataFrame
file_name = 'NAT_ORG'
df4 = download_table_cnv(file_name)
# Adequa e formata a TCC NAT_ORG
df4.rename(index=str, columns={'ID': 'CODESTAB', 'SIGNIFICACAO': 'REGIME'}, inplace=True)
# Adiciona zeros à esquerda nos valores (tipo string) da coluna "CODESTAB" do objeto "df4" até formar...
# uma "string" de tamanho = 7
df4['CODESTAB'] = df4['CODESTAB'].apply(lambda x: x.zfill(7))
# Realiza o "merge" da TCC ESFERA18 à TCC NAT_ORG
df5 = df3.append(df4, sort=False)
df6 = df5.replace(np.nan,'').groupby('CODESTAB',as_index=False).agg(''.join)
df6.sort_values(by=['CODESTAB'], inplace=True)
df6.reset_index(drop=True, inplace=True)
# Realiza o "merge" da TCC ESFERA18 (+ TCC NAT_ORG) à (Tabela CNESDO18 + TCC ESTAB06)
df7 = df.append(df6, sort=False)
df8 = df7.replace(np.nan,'').groupby('CODESTAB',as_index=False).agg(''.join)
df8.sort_values(by=['CODESTAB'], inplace=True)
df8.reset_index(drop=True, inplace=True)
# Substitui os valores de string vazia das colunas especificadas pela string "?"
df8['DESCESTAB'].replace('','?', inplace=True)
df8['ESFERA'].replace('','?', inplace=True)
df8['REGIME'].replace('','?', inplace=True)
# Upload do arquivo "xlsx" que contém os CODESTAB presentes nos arquivos DOXXaaaa (dos anos de...
# 1997 a 2017) e não presentes na tabela CNESDO18 e nas TCC ESTAB06, ESFERA18 e NAT_ORG. Ou seja,...
# isso parece ser uma falha dos dados do Datasus
dataframe = pd.read_excel(self.path + 'CODESTAB_OUT_CNESDO18_E_3TCC_ANOS_1997_2017' + '.xlsx')
# Converte a coluna "CODESTAB" do objeto "dataframe" de "int" para "string"
dataframe['CODESTAB'] = dataframe['CODESTAB'].astype('str')
# Adiciona zeros à esquerda nos valores (tipo string) da coluna "CODESTAB" do objeto "dataframe" até formar...
# uma "string" de tamanho = 7
dataframe['CODESTAB'] = dataframe['CODESTAB'].apply(lambda x: x.zfill(7))
# Adiciona as colunas "DESCESTAB", "ESFERA" e "REGIME" e respectivos valores ao objeto "dataframe"
dataframe['DESCESTAB'] = ['NAO PROVIDO EM CNESDO18.DBF E NAS TCC ESTAB06/ESFERA18/NAT_ORG'] * (dataframe.shape[0])
dataframe['ESFERA'] = ['?'] * (dataframe.shape[0])
dataframe['REGIME'] = ['?'] * (dataframe.shape[0])
# Concatenação do objeto "dataframe" ao objeto "df8"
frames = []
frames.append(df8)
frames.append(dataframe)
dfinal = pd.concat(frames, ignore_index=True)
# Renomeia a coluna "CODESTAB"
dfinal.rename(index=str, columns={'CODESTAB': 'ID'}, inplace=True)
# Elimina eventuais linhas duplicadas tendo por base a coluna ID e mantém a primeira ocorrência
dfinal.drop_duplicates(subset='ID', keep='first', inplace=True)
# Ordena eventualmente as linhas por ordem crescente dos valores da coluna ID
dfinal.sort_values(by=['ID'], inplace=True)
# Reset eventualmente o index devido ao sorting prévio e à eventual eliminação de duplicates
dfinal.reset_index(drop=True, inplace=True)
# Inserção da primary key "NA" na tabela de que trata esta função para retratar "missing value"
dfinal.loc[dfinal.shape[0]] = ['NA', 'NOT AVAILABLE', '?', '?']
return dfinal
# Função para adequar e formatar as colunas e valores da Tabela TPMORTEOCO ("constando" apenas...
# do Dicionário de Dados do SIM)
def get_TPMORTEOCO_treated(self):
# Conversão da Tabela TPMORTEOCO (em formato "xlsx") para um objeto pandas DataFrame
df = pd.read_excel(self.path + 'TPMORTEOCO' + '.xlsx')
# Renomeia a coluna SIGNIFICACAO
df.rename(index=str, columns={'SIGNIFICACAO': 'EPOCA_MORTE'}, inplace=True)
# Converte para string a coluna especificada
df['ID'] = df['ID'].astype('str')
# Inserção da primary key "NA" na tabela de que trata esta função para retratar "missing value"
df.loc[df.shape[0]] = ['NA', 'NOT AVAILABLE']
return df
# Função para adequar e formatar as colunas e valores da Tabela CID10 (arquivo CID10.dbf) e de...
# 21 TCC com nome CID10_XX (arquivos "cnv") sendo XX indo de 01 a 21, um para cada capítulo do CID 10.
def get_CID10_treated(self):
# Conversão da Tabela CID10 para um objeto pandas DataFrame
file_name = 'CID10'
df1 = download_table_dbf(file_name)
# Remove colunas indesejáveis do objeto pandas DataFrame
df1 = df1.drop(['OPC', 'CAT', 'SUBCAT', 'RESTRSEXO'], axis=1)
# Renomeia as colunas especificadas
df1.rename(index=str, columns={'CID10': 'ID', 'DESCR': 'DOENCA'}, inplace=True)
# Coloca todas as string da coluna especificada como UPPER CASE
df1['DOENCA'] = df1['DOENCA'].apply(lambda x: x.upper())
# Ordena as linhas de "df1" por ordem crescente dos valores da coluna ID
df1.sort_values(by=['ID'], inplace=True)
# Reset o index devido ao sorting prévio
df1.reset_index(drop=True, inplace=True)
# Conversão das 21 TCC CID10_XX para um objeto pandas DataFrame
frames = []
for i in range(1, 22):
i = str(i).zfill(2)
file_name = 'CID10_' + i
dfi = download_table_cnv(file_name)
frames.append(dfi)
df2 =
|
pd.concat(frames, ignore_index=True)
|
pandas.concat
|
# Import modules
import abc
import random
import numpy as np
import pandas as pd
from tqdm import tqdm
from math import floor
from itertools import chain
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.layers import *
from tensorflow.keras import Sequential
from tensorflow.keras import regularizers
from typeguard import typechecked
from sklearn.cluster import KMeans, SpectralClustering, MiniBatchKMeans
from skimage.feature import hog
from skimage.color import rgb2gray
from scipy.cluster.vq import vq
import matplotlib.pyplot as plt
#######################################################
class alAlgo(metaclass=abc.ABCMeta):
"""
alAlgo() Documentation:
--------------------------
Purpose
----------
Parent class that will be used for making new Active Learning algo classes.
Currently, the class is very sparse. Will make adjustments as the project continues.
Attributes
----------
algo_name : str
used to keep track of name of algo in engine.log
sample_log : dict
tracks what samples are chosen each round, places sample ids in list within dict
round : int
tracks what round algo is on
predict_to_sample : bool
bool that determines whether or not the algo needs the predictions of the model to choose which samples to label
Methods
-------
@classmethod
__subclasshook__(cls, subclass):
Used to check if custom child class of alAlgo is properly made
reset(self):
set round=0 and sample_log={}
@abc.abstractmethod
__call__(self, cache: list, n: int, yh):
Empty function that is required to be declared in custom child class. Allows for algo
to be called to pick which samples to return based on algo criteria.
"""
def __init__(self, algo_name="NA"):
self.algo_name = algo_name
self.round = 0
self.sample_log = {}
self.predict_to_sample = False
@classmethod
def __subclasshook__(cls, subclass):
return (hasattr(subclass, '__call__') and
callable(subclass.__call__) or
NotImplemented)
def reset(self):
self.round = 0
self.sample_log = {}
@abc.abstractmethod
def __call__(self, cache: list, n: int, yh):
""" Selects which samples to get labels for """
raise NotImplementedError
#######################################################
class marginConfidence(alAlgo):
"""
marginConfidence(alAlgo) Documentation:
--------------------------
Purpose
----------
Custom active learning class, inherits alAlgo class.
Score samples by predictions through formula MC(x)=(1-(P(y1*|x)-P(y2*|x)))
Attributes
----------
predict_to_sample : bool
Determines if algo needs models prediction on cache to determine what samples from the cache to return
Methods
-------
@abc.abstractmethod
__call__(self, cache: list, n: int, yh):
Empty function that is required to be declared in custom child class. Allows for algo
to be called to pick which samples to return based on algo criteria.
"""
def __init__(self):
super().__init__(algo_name="Margin Confidence")
self.predict_to_sample = True
self.feature_set = False
self.single_output = False
def __call__(self, cache: list, n: int, yh) -> list:
# Check if embedded cache, then cache is available for the round
if any(isinstance(i, list) for i in cache):
try:
cache = cache[self.round]
except:
raise ValueError("Active Learning Algo has iterated through each round\'s unlabled cache.")
# Check if sample size is to large for cache
if len(cache) < n:
raise ValueError("Sample size n is larger than length of round's cache")
# Calculate MC(x) values
yh_vals = yh.iloc[:, 1:].values
MC_vals = []
for i in range(yh_vals.shape[0]):
sample = yh_vals[i, :]
sample[::-1].sort()
y1, y2 = sample[0], sample[1]
mc_val = 1 - (y1 - y2)
MC_vals.append(mc_val)
target_col_names = ["y" + str(i) for i in range(yh_vals.shape[1])]
yh_col_names = ["MC", "ID"] + target_col_names
yh = pd.concat([pd.DataFrame(MC_vals), yh], axis=1)
yh.columns = yh_col_names
# Get ids of n largest LC vals
n_largest = yh.nlargest(n, 'MC')
batch = n_largest["ID"].to_list()
# Log which samples were used for that round
self.sample_log[str(self.round)] = batch
print("\n")
print("Round {} selected samples: {}".format(self.round, batch))
print("\n")
# Increment round
self.round += 1
return batch
#######################################################
class leastConfidence(alAlgo):
"""
leastConfidence(alAlgo) Documentation:
--------------------------
Purpose
----------
Custom active learning class, inherits alAlgo class.
Score samples by predictions through formula LC(x)=(1-P(y*|x))*(n/(n-1))
Attributes
----------
predict_to_sample : bool
Determines if algo needs models prediction on cache to determine what samples from the cache to return
Methods
-------
@abc.abstractmethod
__call__(self, cache: list, n: int, yh):
Empty function that is required to be declared in custom child class. Allows for algo
to be called to pick which samples to return based on algo criteria.
"""
def __init__(self):
super().__init__(algo_name="Least Confidence")
self.predict_to_sample = True
self.feature_set = False
self.single_output = False
def __call__(self, cache: list, n: int, yh) -> list:
# Check if embedded cache, then cache is available for the round
if any(isinstance(i, list) for i in cache):
try:
cache = cache[self.round]
except:
raise ValueError("Active Learning Algo has iterated through each round\'s unlabled cache.")
# Check if sample size is to large for cache
if len(cache) < n:
raise ValueError("Sample size n is larger than length of round's cache")
# Calculate LC(x) values
yh_vals = yh.iloc[:, 1:].values
LC_vals = []
for i in range(yh_vals.shape[0]):
sample = yh_vals[i, :]
lc = (1 - np.amax(sample)) * (yh_vals.shape[1] / (yh_vals.shape[1] - 1))
LC_vals.append((lc))
target_col_names = ["y" + str(i) for i in range(yh_vals.shape[1])]
yh_col_names = ["LC", "ID"] + target_col_names
yh = pd.concat([pd.DataFrame(LC_vals), yh], axis=1)
yh.columns = yh_col_names
# Get ids of n largest LC vals
n_largest = yh.nlargest(n, 'LC')
batch = n_largest["ID"].to_list()
# Log which samples were used for that round
self.sample_log[str(self.round)] = batch
print("\n")
print("Round {} selected samples: {}".format(self.round, batch))
print("\n")
# Increment round
self.round += 1
return batch
#######################################################
class uniformSample(alAlgo):
"""
uniformSample(alAlgo) Documentation:
--------------------------
Purpose
----------
Custom active learning class, inherits alAlgo class.
Randomly samples over a uniform distribution of passed cache of data ids.
Use as a baseline to compare the performance of your active learning algorithms.
Attributes
----------
predict_to_sample : bool
Determines if algo needs models prediction on cache to determine what samples from the cache to return
Methods
-------
@abc.abstractmethod
__call__(self, cache: list, n: int, yh):
Empty function that is required to be declared in custom child class. Allows for algo
to be called to pick which samples to return based on algo criteria.
"""
def __init__(self):
super().__init__(algo_name="Passive")
self.predict_to_sample = False
self.feature_set = False
self.single_output = False
def __call__(self, cache: list, n: int, yh=None) -> list:
# Check if embedded cache, then cache is available for the round
if any(isinstance(i, list) for i in cache):
try:
cache = cache[self.round]
except:
raise ValueError("Active Learning Algo has iterated through each round\'s unlabled cache.")
# Check if sample size is to large for cache
if len(cache) < n:
raise ValueError("Sample size n is larger than length of round's cache")
# Select from uniform distributions data ID's from given cache
idx = random.sample(range(0, len(cache)), n)
batch = [cache[i] for i in idx]
# Log which samples were used for that round
self.sample_log[str(self.round)] = batch
print("Selected samples: ")
print(idx)
print("\n")
# Increment round
self.round += 1
return batch
#######################################################
class ratioConfidence(alAlgo):
"""
ratioConfidence(alAlgo) Documentation:
--------------------------
Purpose
----------
Custom active learning class, inherits alAlgo class.
Score samples by predictions through formula theta(x)=P(y_1/x)/P(y_2/x)
Attributes
----------
predict_to_sample : bool
Determines if algo needs models prediction on cache to determine what samples from the cache to return
Methods
-------
@abc.abstractmethod
__call__(self, cache: list, n: int, yh):
Empty function that is required to be declared in custom child class. Allows for algo
to be called to pick which samples to return based on algo criteria.
"""
def __init__(self):
super().__init__(algo_name="Ratio Confidence")
self.predict_to_sample = True
self.feature_set = False
self.single_output = False
def __call__(self, cache: list, n: int, yh) -> list:
# Check if embedded cache, then cache is available for the round
if any(isinstance(i, list) for i in cache):
try:
cache = cache[self.round]
except:
raise ValueError("Active Learning Algo has iterated through each round\'s unlabled cache.")
# Check if sample size is to large for cache
if len(cache) < n:
raise ValueError("Sample size n is larger than length of round's cache")
# Calculate RC(x) values
yh_vals = yh.iloc[:, 1:].values
RC_vals = []
for i in range(yh_vals.shape[0]):
sample = yh_vals[i, :]
sample[::-1].sort()
y1, y2 = sample[0], sample[1]
if y2 == 0:
RC_vals.append(100)
else:
RC_vals.append(y1 / y2)
target_col_names = ["y" + str(i) for i in range(yh_vals.shape[1])]
yh_col_names = ["RC", "ID"] + target_col_names
yh = pd.concat([
|
pd.DataFrame(RC_vals)
|
pandas.DataFrame
|
import os
import shutil
import tempfile
from datetime import datetime
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from keras_scope.test import test
from utils.utils import ensure_dir, save_dataset
from keras_scope.train import train
def cross_validate_with_ground_truth_lesion():
"""
Rationale: check if adding ground truth lesion adds information for final outcome prediction
"""
n_repeats = 1
n_folds = 5
label_file_path = '/mnt/data/hendrik/jk/scope_data/joined_anon_outcomes_2015_2016_2017_2018_df.xlsx'
imaging_dataset_path = '/mnt/data/hendrik/jk/scope_data/data_set_with_combined_mRS_0-2_90_days.npz'
output_dir = '/home/hendrik/jk/output/keras_scope/with_gt_lesion_cross_validation'
# imaging_dataset_path = "/Users/jk1/stroke_datasets/dataset_files/perfusion_data_sets/data_set_with_combined_mRS_0-2_90_days.npz"
# label_file_path = "/Users/jk1/temp/scope_test/joined_anon_outcomes_2015_2016_2017_2018_df.xlsx"
# output_dir = '/Users/jk1/temp/cv_scope_test'
channels = [0, 1, 2, 3, 4]
outcome = "combined_mRS_0-2_90_days"
desired_shape = (46, 46, 46)
epochs = 400
initial_learning_rate = 0.0001
ensure_dir(output_dir)
output_dir = os.path.join(output_dir, 'cv_' + datetime.now().strftime("%Y%m%d_%H%M%S"))
ensure_dir(output_dir)
# load data
params = np.load(imaging_dataset_path, allow_pickle=True)['params']
ids = np.load(imaging_dataset_path, allow_pickle=True)['ids']
outcomes_df =
|
pd.read_excel(label_file_path)
|
pandas.read_excel
|
"""
SparseArray data structure
"""
from __future__ import division
import numbers
import operator
import re
from typing import Any, Callable, Union
import warnings
import numpy as np
from pandas._libs import index as libindex, lib
import pandas._libs.sparse as splib
from pandas._libs.sparse import BlockIndex, IntIndex, SparseIndex
from pandas._libs.tslibs import NaT
import pandas.compat as compat
from pandas.compat.numpy import function as nv
from pandas.errors import PerformanceWarning
from pandas.core.dtypes.base import ExtensionDtype
from pandas.core.dtypes.cast import (
astype_nansafe, construct_1d_arraylike_from_scalar, find_common_type,
infer_dtype_from_scalar, maybe_convert_platform)
from pandas.core.dtypes.common import (
is_array_like, is_bool_dtype, is_datetime64_any_dtype, is_dtype_equal,
is_integer, is_list_like, is_object_dtype, is_scalar, is_string_dtype,
pandas_dtype)
from pandas.core.dtypes.dtypes import register_extension_dtype
from pandas.core.dtypes.generic import (
ABCIndexClass, ABCSeries, ABCSparseSeries)
from pandas.core.dtypes.missing import isna, na_value_for_dtype, notna
from pandas.core.accessor import PandasDelegate, delegate_names
import pandas.core.algorithms as algos
from pandas.core.arrays import ExtensionArray, ExtensionOpsMixin
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
import pandas.io.formats.printing as printing
# ----------------------------------------------------------------------------
# Dtype
@register_extension_dtype
class SparseDtype(ExtensionDtype):
"""
Dtype for data stored in :class:`SparseArray`.
This dtype implements the pandas ExtensionDtype interface.
.. versionadded:: 0.24.0
Parameters
----------
dtype : str, ExtensionDtype, numpy.dtype, type, default numpy.float64
The dtype of the underlying array storing the non-fill value values.
fill_value : scalar, optional
The scalar value not stored in the SparseArray. By default, this
depends on `dtype`.
=========== ==========
dtype na_value
=========== ==========
float ``np.nan``
int ``0``
bool ``False``
datetime64 ``pd.NaT``
timedelta64 ``pd.NaT``
=========== ==========
The default value may be overridden by specifying a `fill_value`.
"""
# We include `_is_na_fill_value` in the metadata to avoid hash collisions
# between SparseDtype(float, 0.0) and SparseDtype(float, nan).
# Without is_na_fill_value in the comparison, those would be equal since
# hash(nan) is (sometimes?) 0.
_metadata = ('_dtype', '_fill_value', '_is_na_fill_value')
def __init__(self, dtype=np.float64, fill_value=None):
# type: (Union[str, np.dtype, 'ExtensionDtype', type], Any) -> None
from pandas.core.dtypes.missing import na_value_for_dtype
from pandas.core.dtypes.common import (
pandas_dtype, is_string_dtype, is_scalar
)
if isinstance(dtype, type(self)):
if fill_value is None:
fill_value = dtype.fill_value
dtype = dtype.subtype
dtype = pandas_dtype(dtype)
if is_string_dtype(dtype):
dtype = np.dtype('object')
if fill_value is None:
fill_value = na_value_for_dtype(dtype)
if not is_scalar(fill_value):
raise ValueError("fill_value must be a scalar. Got {} "
"instead".format(fill_value))
self._dtype = dtype
self._fill_value = fill_value
def __hash__(self):
# Python3 doesn't inherit __hash__ when a base class overrides
# __eq__, so we explicitly do it here.
return super(SparseDtype, self).__hash__()
def __eq__(self, other):
# We have to override __eq__ to handle NA values in _metadata.
# The base class does simple == checks, which fail for NA.
if isinstance(other, compat.string_types):
try:
other = self.construct_from_string(other)
except TypeError:
return False
if isinstance(other, type(self)):
subtype = self.subtype == other.subtype
if self._is_na_fill_value:
# this case is complicated by two things:
# SparseDtype(float, float(nan)) == SparseDtype(float, np.nan)
# SparseDtype(float, np.nan) != SparseDtype(float, pd.NaT)
# i.e. we want to treat any floating-point NaN as equal, but
# not a floating-point NaN and a datetime NaT.
fill_value = (
other._is_na_fill_value and
isinstance(self.fill_value, type(other.fill_value)) or
isinstance(other.fill_value, type(self.fill_value))
)
else:
fill_value = self.fill_value == other.fill_value
return subtype and fill_value
return False
@property
def fill_value(self):
"""
The fill value of the array.
Converting the SparseArray to a dense ndarray will fill the
array with this value.
.. warning::
It's possible to end up with a SparseArray that has ``fill_value``
values in ``sp_values``. This can occur, for example, when setting
``SparseArray.fill_value`` directly.
"""
return self._fill_value
@property
def _is_na_fill_value(self):
from pandas.core.dtypes.missing import isna
return isna(self.fill_value)
@property
def _is_numeric(self):
from pandas.core.dtypes.common import is_object_dtype
return not is_object_dtype(self.subtype)
@property
def _is_boolean(self):
from pandas.core.dtypes.common import is_bool_dtype
return is_bool_dtype(self.subtype)
@property
def kind(self):
"""
The sparse kind. Either 'integer', or 'block'.
"""
return self.subtype.kind
@property
def type(self):
return self.subtype.type
@property
def subtype(self):
return self._dtype
@property
def name(self):
return 'Sparse[{}, {}]'.format(self.subtype.name, self.fill_value)
def __repr__(self):
return self.name
@classmethod
def construct_array_type(cls):
return SparseArray
@classmethod
def construct_from_string(cls, string):
"""
Construct a SparseDtype from a string form.
Parameters
----------
string : str
Can take the following forms.
string dtype
================ ============================
'int' SparseDtype[np.int64, 0]
'Sparse' SparseDtype[np.float64, nan]
'Sparse[int]' SparseDtype[np.int64, 0]
'Sparse[int, 0]' SparseDtype[np.int64, 0]
================ ============================
It is not possible to specify non-default fill values
with a string. An argument like ``'Sparse[int, 1]'``
will raise a ``TypeError`` because the default fill value
for integers is 0.
Returns
-------
SparseDtype
"""
msg = "Could not construct SparseDtype from '{}'".format(string)
if string.startswith("Sparse"):
try:
sub_type, has_fill_value = cls._parse_subtype(string)
result = SparseDtype(sub_type)
except Exception:
raise TypeError(msg)
else:
msg = ("Could not construct SparseDtype from '{}'.\n\nIt "
"looks like the fill_value in the string is not "
"the default for the dtype. Non-default fill_values "
"are not supported. Use the 'SparseDtype()' "
"constructor instead.")
if has_fill_value and str(result) != string:
raise TypeError(msg.format(string))
return result
else:
raise TypeError(msg)
@staticmethod
def _parse_subtype(dtype):
"""
Parse a string to get the subtype
Parameters
----------
dtype : str
A string like
* Sparse[subtype]
* Sparse[subtype, fill_value]
Returns
-------
subtype : str
Raises
------
ValueError
When the subtype cannot be extracted.
"""
xpr = re.compile(
r"Sparse\[(?P<subtype>[^,]*)(, )?(?P<fill_value>.*?)?\]$"
)
m = xpr.match(dtype)
has_fill_value = False
if m:
subtype = m.groupdict()['subtype']
has_fill_value = m.groupdict()['fill_value'] or has_fill_value
elif dtype == "Sparse":
subtype = 'float64'
else:
raise ValueError("Cannot parse {}".format(dtype))
return subtype, has_fill_value
@classmethod
def is_dtype(cls, dtype):
dtype = getattr(dtype, 'dtype', dtype)
if (isinstance(dtype, compat.string_types) and
dtype.startswith("Sparse")):
sub_type, _ = cls._parse_subtype(dtype)
dtype = np.dtype(sub_type)
elif isinstance(dtype, cls):
return True
return isinstance(dtype, np.dtype) or dtype == 'Sparse'
def update_dtype(self, dtype):
"""
Convert the SparseDtype to a new dtype.
This takes care of converting the ``fill_value``.
Parameters
----------
dtype : Union[str, numpy.dtype, SparseDtype]
The new dtype to use.
* For a SparseDtype, it is simply returned
* For a NumPy dtype (or str), the current fill value
is converted to the new dtype, and a SparseDtype
with `dtype` and the new fill value is returned.
Returns
-------
SparseDtype
A new SparseDtype with the corret `dtype` and fill value
for that `dtype`.
Raises
------
ValueError
When the current fill value cannot be converted to the
new `dtype` (e.g. trying to convert ``np.nan`` to an
integer dtype).
Examples
--------
>>> SparseDtype(int, 0).update_dtype(float)
Sparse[float64, 0.0]
>>> SparseDtype(int, 1).update_dtype(SparseDtype(float, np.nan))
Sparse[float64, nan]
"""
cls = type(self)
dtype = pandas_dtype(dtype)
if not isinstance(dtype, cls):
fill_value = astype_nansafe(np.array(self.fill_value),
dtype).item()
dtype = cls(dtype, fill_value=fill_value)
return dtype
@property
def _subtype_with_str(self):
"""
Whether the SparseDtype's subtype should be considered ``str``.
Typically, pandas will store string data in an object-dtype array.
When converting values to a dtype, e.g. in ``.astype``, we need to
be more specific, we need the actual underlying type.
Returns
-------
>>> SparseDtype(int, 1)._subtype_with_str
dtype('int64')
>>> SparseDtype(object, 1)._subtype_with_str
dtype('O')
>>> dtype = SparseDtype(str, '')
>>> dtype.subtype
dtype('O')
>>> dtype._subtype_with_str
str
"""
if isinstance(self.fill_value, compat.string_types):
return type(self.fill_value)
return self.subtype
# ----------------------------------------------------------------------------
# Array
_sparray_doc_kwargs = dict(klass='SparseArray')
def _get_fill(arr):
# type: (SparseArray) -> np.ndarray
"""
Create a 0-dim ndarray containing the fill value
Parameters
----------
arr : SparseArray
Returns
-------
fill_value : ndarray
0-dim ndarray with just the fill value.
Notes
-----
coerce fill_value to arr dtype if possible
int64 SparseArray can have NaN as fill_value if there is no missing
"""
try:
return np.asarray(arr.fill_value, dtype=arr.dtype.subtype)
except ValueError:
return np.asarray(arr.fill_value)
def _sparse_array_op(left, right, op, name):
# type: (SparseArray, SparseArray, Callable, str) -> Any
"""
Perform a binary operation between two arrays.
Parameters
----------
left : Union[SparseArray, ndarray]
right : Union[SparseArray, ndarray]
op : Callable
The binary operation to perform
name str
Name of the callable.
Returns
-------
SparseArray
"""
if name.startswith('__'):
# For lookups in _libs.sparse we need non-dunder op name
name = name[2:-2]
# dtype used to find corresponding sparse method
ltype = left.dtype.subtype
rtype = right.dtype.subtype
if not is_dtype_equal(ltype, rtype):
subtype = find_common_type([ltype, rtype])
ltype = SparseDtype(subtype, left.fill_value)
rtype = SparseDtype(subtype, right.fill_value)
# TODO(GH-23092): pass copy=False. Need to fix astype_nansafe
left = left.astype(ltype)
right = right.astype(rtype)
dtype = ltype.subtype
else:
dtype = ltype
# dtype the result must have
result_dtype = None
if left.sp_index.ngaps == 0 or right.sp_index.ngaps == 0:
with np.errstate(all='ignore'):
result = op(left.get_values(), right.get_values())
fill = op(_get_fill(left), _get_fill(right))
if left.sp_index.ngaps == 0:
index = left.sp_index
else:
index = right.sp_index
elif left.sp_index.equals(right.sp_index):
with np.errstate(all='ignore'):
result = op(left.sp_values, right.sp_values)
fill = op(_get_fill(left), _get_fill(right))
index = left.sp_index
else:
if name[0] == 'r':
left, right = right, left
name = name[1:]
if name in ('and', 'or') and dtype == 'bool':
opname = 'sparse_{name}_uint8'.format(name=name)
# to make template simple, cast here
left_sp_values = left.sp_values.view(np.uint8)
right_sp_values = right.sp_values.view(np.uint8)
result_dtype = np.bool
else:
opname = 'sparse_{name}_{dtype}'.format(name=name, dtype=dtype)
left_sp_values = left.sp_values
right_sp_values = right.sp_values
sparse_op = getattr(splib, opname)
with np.errstate(all='ignore'):
result, index, fill = sparse_op(
left_sp_values, left.sp_index, left.fill_value,
right_sp_values, right.sp_index, right.fill_value)
if result_dtype is None:
result_dtype = result.dtype
return _wrap_result(name, result, index, fill, dtype=result_dtype)
def _wrap_result(name, data, sparse_index, fill_value, dtype=None):
"""
wrap op result to have correct dtype
"""
if name.startswith('__'):
# e.g. __eq__ --> eq
name = name[2:-2]
if name in ('eq', 'ne', 'lt', 'gt', 'le', 'ge'):
dtype = np.bool
fill_value = lib.item_from_zerodim(fill_value)
if is_bool_dtype(dtype):
# fill_value may be np.bool_
fill_value = bool(fill_value)
return SparseArray(data,
sparse_index=sparse_index,
fill_value=fill_value,
dtype=dtype)
class SparseArray(PandasObject, ExtensionArray, ExtensionOpsMixin):
"""
An ExtensionArray for storing sparse data.
.. versionchanged:: 0.24.0
Implements the ExtensionArray interface.
Parameters
----------
data : array-like
A dense array of values to store in the SparseArray. This may contain
`fill_value`.
sparse_index : SparseIndex, optional
index : Index
fill_value : scalar, optional
Elements in `data` that are `fill_value` are not stored in the
SparseArray. For memory savings, this should be the most common value
in `data`. By default, `fill_value` depends on the dtype of `data`:
=========== ==========
data.dtype na_value
=========== ==========
float ``np.nan``
int ``0``
bool False
datetime64 ``pd.NaT``
timedelta64 ``pd.NaT``
=========== ==========
The fill value is potentiall specified in three ways. In order of
precedence, these are
1. The `fill_value` argument
2. ``dtype.fill_value`` if `fill_value` is None and `dtype` is
a ``SparseDtype``
3. ``data.dtype.fill_value`` if `fill_value` is None and `dtype`
is not a ``SparseDtype`` and `data` is a ``SparseArray``.
kind : {'integer', 'block'}, default 'integer'
The type of storage for sparse locations.
* 'block': Stores a `block` and `block_length` for each
contiguous *span* of sparse values. This is best when
sparse data tends to be clumped together, with large
regsions of ``fill-value`` values between sparse values.
* 'integer': uses an integer to store the location of
each sparse value.
dtype : np.dtype or SparseDtype, optional
The dtype to use for the SparseArray. For numpy dtypes, this
determines the dtype of ``self.sp_values``. For SparseDtype,
this determines ``self.sp_values`` and ``self.fill_value``.
copy : bool, default False
Whether to explicitly copy the incoming `data` array.
"""
__array_priority__ = 15
_pandas_ftype = 'sparse'
_subtyp = 'sparse_array' # register ABCSparseArray
def __init__(self, data, sparse_index=None, index=None, fill_value=None,
kind='integer', dtype=None, copy=False):
from pandas.core.internals import SingleBlockManager
if isinstance(data, SingleBlockManager):
data = data.internal_values()
if fill_value is None and isinstance(dtype, SparseDtype):
fill_value = dtype.fill_value
if isinstance(data, (type(self), ABCSparseSeries)):
# disable normal inference on dtype, sparse_index, & fill_value
if sparse_index is None:
sparse_index = data.sp_index
if fill_value is None:
fill_value = data.fill_value
if dtype is None:
dtype = data.dtype
# TODO: make kind=None, and use data.kind?
data = data.sp_values
# Handle use-provided dtype
if isinstance(dtype, compat.string_types):
# Two options: dtype='int', regular numpy dtype
# or dtype='Sparse[int]', a sparse dtype
try:
dtype = SparseDtype.construct_from_string(dtype)
except TypeError:
dtype = pandas_dtype(dtype)
if isinstance(dtype, SparseDtype):
if fill_value is None:
fill_value = dtype.fill_value
dtype = dtype.subtype
if index is not None and not is_scalar(data):
raise Exception("must only pass scalars with an index ")
if is_scalar(data):
if index is not None:
if data is None:
data = np.nan
if index is not None:
npoints = len(index)
elif sparse_index is None:
npoints = 1
else:
npoints = sparse_index.length
dtype = infer_dtype_from_scalar(data)[0]
data = construct_1d_arraylike_from_scalar(
data, npoints, dtype
)
if dtype is not None:
dtype = pandas_dtype(dtype)
# TODO: disentangle the fill_value dtype inference from
# dtype inference
if data is None:
# XXX: What should the empty dtype be? Object or float?
data = np.array([], dtype=dtype)
if not is_array_like(data):
try:
# probably shared code in sanitize_series
from pandas.core.internals.construction import sanitize_array
data = sanitize_array(data, index=None)
except ValueError:
# NumPy may raise a ValueError on data like [1, []]
# we retry with object dtype here.
if dtype is None:
dtype = object
data = np.atleast_1d(np.asarray(data, dtype=dtype))
else:
raise
if copy:
# TODO: avoid double copy when dtype forces cast.
data = data.copy()
if fill_value is None:
fill_value_dtype = data.dtype if dtype is None else dtype
if fill_value_dtype is None:
fill_value = np.nan
else:
fill_value = na_value_for_dtype(fill_value_dtype)
if isinstance(data, type(self)) and sparse_index is None:
sparse_index = data._sparse_index
sparse_values = np.asarray(data.sp_values, dtype=dtype)
elif sparse_index is None:
sparse_values, sparse_index, fill_value = make_sparse(
data, kind=kind, fill_value=fill_value, dtype=dtype
)
else:
sparse_values = np.asarray(data, dtype=dtype)
if len(sparse_values) != sparse_index.npoints:
raise AssertionError("Non array-like type {type} must "
"have the same length as the index"
.format(type=type(sparse_values)))
self._sparse_index = sparse_index
self._sparse_values = sparse_values
self._dtype = SparseDtype(sparse_values.dtype, fill_value)
@classmethod
def _simple_new(cls, sparse_array, sparse_index, dtype):
# type: (np.ndarray, SparseIndex, SparseDtype) -> 'SparseArray'
new = cls([])
new._sparse_index = sparse_index
new._sparse_values = sparse_array
new._dtype = dtype
return new
def __array__(self, dtype=None, copy=True):
fill_value = self.fill_value
if self.sp_index.ngaps == 0:
# Compat for na dtype and int values.
return self.sp_values
if dtype is None:
# Can NumPy represent this type?
# If not, `np.result_type` will raise. We catch that
# and return object.
if is_datetime64_any_dtype(self.sp_values.dtype):
# However, we *do* special-case the common case of
# a datetime64 with pandas NaT.
if fill_value is NaT:
# Can't put pd.NaT in a datetime64[ns]
fill_value = np.datetime64('NaT')
try:
dtype = np.result_type(self.sp_values.dtype, type(fill_value))
except TypeError:
dtype = object
out = np.full(self.shape, fill_value, dtype=dtype)
out[self.sp_index.to_int_index().indices] = self.sp_values
return out
def __setitem__(self, key, value):
# I suppose we could allow setting of non-fill_value elements.
# TODO(SparseArray.__setitem__): remove special cases in
# ExtensionBlock.where
msg = "SparseArray does not support item assignment via setitem"
raise TypeError(msg)
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return cls(scalars, dtype=dtype)
@classmethod
def _from_factorized(cls, values, original):
return cls(values, dtype=original.dtype)
# ------------------------------------------------------------------------
# Data
# ------------------------------------------------------------------------
@property
def sp_index(self):
"""
The SparseIndex containing the location of non- ``fill_value`` points.
"""
return self._sparse_index
@property
def sp_values(self):
"""
An ndarray containing the non- ``fill_value`` values.
Examples
--------
>>> s = SparseArray([0, 0, 1, 0, 2], fill_value=0)
>>> s.sp_values
array([1, 2])
"""
return self._sparse_values
@property
def dtype(self):
return self._dtype
@property
def fill_value(self):
"""
Elements in `data` that are `fill_value` are not stored.
For memory savings, this should be the most common value in the array.
"""
return self.dtype.fill_value
@fill_value.setter
def fill_value(self, value):
self._dtype = SparseDtype(self.dtype.subtype, value)
@property
def kind(self):
"""
The kind of sparse index for this array. One of {'integer', 'block'}.
"""
if isinstance(self.sp_index, IntIndex):
return 'integer'
else:
return 'block'
@property
def _valid_sp_values(self):
sp_vals = self.sp_values
mask = notna(sp_vals)
return sp_vals[mask]
def __len__(self):
return self.sp_index.length
@property
def _null_fill_value(self):
return self._dtype._is_na_fill_value
def _fill_value_matches(self, fill_value):
if self._null_fill_value:
return isna(fill_value)
else:
return self.fill_value == fill_value
@property
def nbytes(self):
return self.sp_values.nbytes + self.sp_index.nbytes
@property
def density(self):
"""
The percent of non- ``fill_value`` points, as decimal.
Examples
--------
>>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0)
>>> s.density
0.6
"""
r = float(self.sp_index.npoints) / float(self.sp_index.length)
return r
@property
def npoints(self):
"""
The number of non- ``fill_value`` points.
Examples
--------
>>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0)
>>> s.npoints
3
"""
return self.sp_index.npoints
@property
def values(self):
"""
Dense values
"""
return self.to_dense()
def isna(self):
from pandas import isna
# If null fill value, we want SparseDtype[bool, true]
# to preserve the same memory usage.
dtype = SparseDtype(bool, self._null_fill_value)
return type(self)._simple_new(isna(self.sp_values),
self.sp_index, dtype)
def fillna(self, value=None, method=None, limit=None):
"""
Fill missing values with `value`.
Parameters
----------
value : scalar, optional
method : str, optional
.. warning::
Using 'method' will result in high memory use,
as all `fill_value` methods will be converted to
an in-memory ndarray
limit : int, optional
Returns
-------
SparseArray
Notes
-----
When `value` is specified, the result's ``fill_value`` depends on
``self.fill_value``. The goal is to maintain low-memory use.
If ``self.fill_value`` is NA, the result dtype will be
``SparseDtype(self.dtype, fill_value=value)``. This will preserve
amount of memory used before and after filling.
When ``self.fill_value`` is not NA, the result dtype will be
``self.dtype``. Again, this preserves the amount of memory used.
"""
if ((method is None and value is None) or
(method is not None and value is not None)):
raise ValueError("Must specify one of 'method' or 'value'.")
elif method is not None:
msg = "fillna with 'method' requires high memory usage."
warnings.warn(msg, PerformanceWarning)
filled = interpolate_2d(np.asarray(self), method=method,
limit=limit)
return type(self)(filled, fill_value=self.fill_value)
else:
new_values = np.where(isna(self.sp_values), value, self.sp_values)
if self._null_fill_value:
# This is essentially just updating the dtype.
new_dtype = SparseDtype(self.dtype.subtype, fill_value=value)
else:
new_dtype = self.dtype
return self._simple_new(new_values, self._sparse_index, new_dtype)
def shift(self, periods=1, fill_value=None):
if not len(self) or periods == 0:
return self.copy()
if isna(fill_value):
fill_value = self.dtype.na_value
subtype = np.result_type(fill_value, self.dtype.subtype)
if subtype != self.dtype.subtype:
# just coerce up front
arr = self.astype(SparseDtype(subtype, self.fill_value))
else:
arr = self
empty = self._from_sequence(
[fill_value] * min(abs(periods), len(self)),
dtype=arr.dtype
)
if periods > 0:
a = empty
b = arr[:-periods]
else:
a = arr[abs(periods):]
b = empty
return arr._concat_same_type([a, b])
def _first_fill_value_loc(self):
"""
Get the location of the first missing value.
Returns
-------
int
"""
if len(self) == 0 or self.sp_index.npoints == len(self):
return -1
indices = self.sp_index.to_int_index().indices
if not len(indices) or indices[0] > 0:
return 0
diff = indices[1:] - indices[:-1]
return np.searchsorted(diff, 2) + 1
def unique(self):
uniques = list(algos.unique(self.sp_values))
fill_loc = self._first_fill_value_loc()
if fill_loc >= 0:
uniques.insert(fill_loc, self.fill_value)
return type(self)._from_sequence(uniques, dtype=self.dtype)
def _values_for_factorize(self):
# Still override this for hash_pandas_object
return np.asarray(self), self.fill_value
def factorize(self, na_sentinel=-1):
# Currently, ExtensionArray.factorize -> Tuple[ndarray, EA]
# The sparsity on this is backwards from what Sparse would want. Want
# ExtensionArray.factorize -> Tuple[EA, EA]
# Given that we have to return a dense array of labels, why bother
# implementing an efficient factorize?
labels, uniques = algos.factorize(np.asarray(self),
na_sentinel=na_sentinel)
uniques = SparseArray(uniques, dtype=self.dtype)
return labels, uniques
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of unique values.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN, even if NaN is in sp_values.
Returns
-------
counts : Series
"""
from pandas import Index, Series
keys, counts = algos._value_counts_arraylike(self.sp_values,
dropna=dropna)
fcounts = self.sp_index.ngaps
if fcounts > 0:
if self._null_fill_value and dropna:
pass
else:
if self._null_fill_value:
mask = isna(keys)
else:
mask = keys == self.fill_value
if mask.any():
counts[mask] += fcounts
else:
keys = np.insert(keys, 0, self.fill_value)
counts = np.insert(counts, 0, fcounts)
if not isinstance(keys, ABCIndexClass):
keys = Index(keys)
result = Series(counts, index=keys)
return result
# --------
# Indexing
# --------
def __getitem__(self, key):
if isinstance(key, tuple):
if len(key) > 1:
raise IndexError("too many indices for array.")
key = key[0]
if is_integer(key):
return self._get_val_at(key)
elif isinstance(key, tuple):
data_slice = self.values[key]
elif isinstance(key, slice):
# special case to preserve dtypes
if key == slice(None):
return self.copy()
# TODO: this logic is surely elsewhere
# TODO: this could be more efficient
indices = np.arange(len(self), dtype=np.int32)[key]
return self.take(indices)
else:
# TODO: I think we can avoid densifying when masking a
# boolean SparseArray with another. Need to look at the
# key's fill_value for True / False, and then do an intersection
# on the indicies of the sp_values.
if isinstance(key, SparseArray):
if is_bool_dtype(key):
key = key.to_dense()
else:
key = np.asarray(key)
if com.is_bool_indexer(key) and len(self) == len(key):
return self.take(np.arange(len(key), dtype=np.int32)[key])
elif hasattr(key, '__len__'):
return self.take(key)
else:
raise ValueError("Cannot slice with '{}'".format(key))
return type(self)(data_slice, kind=self.kind)
def _get_val_at(self, loc):
n = len(self)
if loc < 0:
loc += n
if loc >= n or loc < 0:
raise IndexError('Out of bounds access')
sp_loc = self.sp_index.lookup(loc)
if sp_loc == -1:
return self.fill_value
else:
return libindex.get_value_at(self.sp_values, sp_loc)
def take(self, indices, allow_fill=False, fill_value=None):
if is_scalar(indices):
raise ValueError("'indices' must be an array, not a "
"scalar '{}'.".format(indices))
indices = np.asarray(indices, dtype=np.int32)
if indices.size == 0:
result = []
kwargs = {'dtype': self.dtype}
elif allow_fill:
result = self._take_with_fill(indices, fill_value=fill_value)
kwargs = {}
else:
result = self._take_without_fill(indices)
kwargs = {'dtype': self.dtype}
return type(self)(result, fill_value=self.fill_value, kind=self.kind,
**kwargs)
def _take_with_fill(self, indices, fill_value=None):
if fill_value is None:
fill_value = self.dtype.na_value
if indices.min() < -1:
raise ValueError("Invalid value in 'indices'. Must be between -1 "
"and the length of the array.")
if indices.max() >= len(self):
raise IndexError("out of bounds value in 'indices'.")
if len(self) == 0:
# Empty... Allow taking only if all empty
if (indices == -1).all():
dtype = np.result_type(self.sp_values, type(fill_value))
taken = np.empty_like(indices, dtype=dtype)
taken.fill(fill_value)
return taken
else:
raise IndexError('cannot do a non-empty take from an empty '
'axes.')
sp_indexer = self.sp_index.lookup_array(indices)
if self.sp_index.npoints == 0:
# Avoid taking from the empty self.sp_values
taken = np.full(sp_indexer.shape, fill_value=fill_value,
dtype=np.result_type(type(fill_value)))
else:
taken = self.sp_values.take(sp_indexer)
# sp_indexer may be -1 for two reasons
# 1.) we took for an index of -1 (new)
# 2.) we took a value that was self.fill_value (old)
new_fill_indices = indices == -1
old_fill_indices = (sp_indexer == -1) & ~new_fill_indices
# Fill in two steps.
# Old fill values
# New fill values
# potentially coercing to a new dtype at each stage.
m0 = sp_indexer[old_fill_indices] < 0
m1 = sp_indexer[new_fill_indices] < 0
result_type = taken.dtype
if m0.any():
result_type = np.result_type(result_type,
type(self.fill_value))
taken = taken.astype(result_type)
taken[old_fill_indices] = self.fill_value
if m1.any():
result_type = np.result_type(result_type, type(fill_value))
taken = taken.astype(result_type)
taken[new_fill_indices] = fill_value
return taken
def _take_without_fill(self, indices):
to_shift = indices < 0
indices = indices.copy()
n = len(self)
if (indices.max() >= n) or (indices.min() < -n):
if n == 0:
raise IndexError("cannot do a non-empty take from an "
"empty axes.")
else:
raise IndexError("out of bounds value in 'indices'.")
if to_shift.any():
indices[to_shift] += n
if self.sp_index.npoints == 0:
# edge case in take...
# I think just return
out = np.full(indices.shape, self.fill_value,
dtype=np.result_type(type(self.fill_value)))
arr, sp_index, fill_value = make_sparse(out,
fill_value=self.fill_value)
return type(self)(arr, sparse_index=sp_index,
fill_value=fill_value)
sp_indexer = self.sp_index.lookup_array(indices)
taken = self.sp_values.take(sp_indexer)
fillable = (sp_indexer < 0)
if fillable.any():
# TODO: may need to coerce array to fill value
result_type = np.result_type(taken, type(self.fill_value))
taken = taken.astype(result_type)
taken[fillable] = self.fill_value
return taken
def searchsorted(self, v, side="left", sorter=None):
msg = "searchsorted requires high memory usage."
warnings.warn(msg, PerformanceWarning, stacklevel=2)
if not is_scalar(v):
v = np.asarray(v)
v = np.asarray(v)
return np.asarray(self, dtype=self.dtype.subtype).searchsorted(
v, side, sorter
)
def copy(self, deep=False):
if deep:
values = self.sp_values.copy()
else:
values = self.sp_values
return self._simple_new(values, self.sp_index, self.dtype)
@classmethod
def _concat_same_type(cls, to_concat):
fill_values = [x.fill_value for x in to_concat]
fill_value = fill_values[0]
# np.nan isn't a singleton, so we may end up with multiple
# NaNs here, so we ignore tha all NA case too.
if not (len(set(fill_values)) == 1 or isna(fill_values).all()):
warnings.warn("Concatenating sparse arrays with multiple fill "
"values: '{}'. Picking the first and "
"converting the rest.".format(fill_values),
PerformanceWarning,
stacklevel=6)
keep = to_concat[0]
to_concat2 = [keep]
for arr in to_concat[1:]:
to_concat2.append(cls(np.asarray(arr), fill_value=fill_value))
to_concat = to_concat2
values = []
length = 0
if to_concat:
sp_kind = to_concat[0].kind
else:
sp_kind = 'integer'
if sp_kind == 'integer':
indices = []
for arr in to_concat:
idx = arr.sp_index.to_int_index().indices.copy()
idx += length # TODO: wraparound
length += arr.sp_index.length
values.append(arr.sp_values)
indices.append(idx)
data = np.concatenate(values)
indices = np.concatenate(indices)
sp_index = IntIndex(length, indices)
else:
# when concatentating block indices, we don't claim that you'll
# get an identical index as concating the values and then
# creating a new index. We don't want to spend the time trying
# to merge blocks across arrays in `to_concat`, so the resulting
# BlockIndex may have more blocs.
blengths = []
blocs = []
for arr in to_concat:
idx = arr.sp_index.to_block_index()
values.append(arr.sp_values)
blocs.append(idx.blocs.copy() + length)
blengths.append(idx.blengths)
length += arr.sp_index.length
data = np.concatenate(values)
blocs = np.concatenate(blocs)
blengths = np.concatenate(blengths)
sp_index = BlockIndex(length, blocs, blengths)
return cls(data, sparse_index=sp_index, fill_value=fill_value)
def astype(self, dtype=None, copy=True):
"""
Change the dtype of a SparseArray.
The output will always be a SparseArray. To convert to a dense
ndarray with a certain dtype, use :meth:`numpy.asarray`.
Parameters
----------
dtype : np.dtype or ExtensionDtype
For SparseDtype, this changes the dtype of
``self.sp_values`` and the ``self.fill_value``.
For other dtypes, this only changes the dtype of
``self.sp_values``.
copy : bool, default True
Whether to ensure a copy is made, even if not necessary.
Returns
-------
SparseArray
Examples
--------
>>> arr = SparseArray([0, 0, 1, 2])
>>> arr
[0, 0, 1, 2]
Fill: 0
IntIndex
Indices: array([2, 3], dtype=int32)
>>> arr.astype(np.dtype('int32'))
[0, 0, 1, 2]
Fill: 0
IntIndex
Indices: array([2, 3], dtype=int32)
Using a NumPy dtype with a different kind (e.g. float) will coerce
just ``self.sp_values``.
>>> arr.astype(np.dtype('float64'))
... # doctest: +NORMALIZE_WHITESPACE
[0, 0, 1.0, 2.0]
Fill: 0
IntIndex
Indices: array([2, 3], dtype=int32)
Use a SparseDtype if you wish to be change the fill value as well.
>>> arr.astype(SparseDtype("float64", fill_value=np.nan))
... # doctest: +NORMALIZE_WHITESPACE
[nan, nan, 1.0, 2.0]
Fill: nan
IntIndex
Indices: array([2, 3], dtype=int32)
"""
dtype = self.dtype.update_dtype(dtype)
subtype = dtype._subtype_with_str
sp_values = astype_nansafe(self.sp_values,
subtype,
copy=copy)
if sp_values is self.sp_values and copy:
sp_values = sp_values.copy()
return self._simple_new(sp_values,
self.sp_index,
dtype)
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Parameters
----------
mapper : dict, Series, callable
The correspondence from old values to new.
Returns
-------
SparseArray
The output array will have the same density as the input.
The output fill value will be the result of applying the
mapping to ``self.fill_value``
Examples
--------
>>> arr = pd.SparseArray([0, 1, 2])
>>> arr.apply(lambda x: x + 10)
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
>>> arr.apply({0: 10, 1: 11, 2: 12})
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
>>> arr.apply(pd.Series([10, 11, 12], index=[0, 1, 2]))
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
"""
# this is used in apply.
# We get hit since we're an "is_extension_type" but regular extension
# types are not hit. This may be worth adding to the interface.
if isinstance(mapper, ABCSeries):
mapper = mapper.to_dict()
if isinstance(mapper, compat.Mapping):
fill_value = mapper.get(self.fill_value, self.fill_value)
sp_values = [mapper.get(x, None) for x in self.sp_values]
else:
fill_value = mapper(self.fill_value)
sp_values = [mapper(x) for x in self.sp_values]
return type(self)(sp_values, sparse_index=self.sp_index,
fill_value=fill_value)
def to_dense(self):
"""
Convert SparseArray to a NumPy array.
Returns
-------
arr : NumPy array
"""
return np.asarray(self, dtype=self.sp_values.dtype)
# TODO: Look into deprecating this in favor of `to_dense`.
get_values = to_dense
# ------------------------------------------------------------------------
# IO
# ------------------------------------------------------------------------
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, tuple):
# Compat for pandas < 0.24.0
nd_state, (fill_value, sp_index) = state
sparse_values = np.array([])
sparse_values.__setstate__(nd_state)
self._sparse_values = sparse_values
self._sparse_index = sp_index
self._dtype = SparseDtype(sparse_values.dtype, fill_value)
else:
self.__dict__.update(state)
def nonzero(self):
if self.fill_value == 0:
return self.sp_index.to_int_index().indices,
else:
return self.sp_index.to_int_index().indices[self.sp_values != 0],
# ------------------------------------------------------------------------
# Reductions
# ------------------------------------------------------------------------
def _reduce(self, name, skipna=True, **kwargs):
method = getattr(self, name, None)
if method is None:
raise TypeError("cannot perform {name} with type {dtype}".format(
name=name, dtype=self.dtype))
if skipna:
arr = self
else:
arr = self.dropna()
# we don't support these kwargs.
# They should only be present when called via pandas, so do it here.
# instead of in `any` / `all` (which will raise if they're present,
# thanks to nv.validate
kwargs.pop('filter_type', None)
kwargs.pop('numeric_only', None)
kwargs.pop('op', None)
return getattr(arr, name)(**kwargs)
def all(self, axis=None, *args, **kwargs):
"""
Tests whether all elements evaluate True
Returns
-------
all : bool
See Also
--------
numpy.all
"""
nv.validate_all(args, kwargs)
values = self.sp_values
if len(values) != len(self) and not np.all(self.fill_value):
return False
return values.all()
def any(self, axis=0, *args, **kwargs):
"""
Tests whether at least one of elements evaluate True
Returns
-------
any : bool
See Also
--------
numpy.any
"""
nv.validate_any(args, kwargs)
values = self.sp_values
if len(values) != len(self) and np.any(self.fill_value):
return True
return values.any().item()
def sum(self, axis=0, *args, **kwargs):
"""
Sum of non-NA/null values
Returns
-------
sum : float
"""
nv.validate_sum(args, kwargs)
valid_vals = self._valid_sp_values
sp_sum = valid_vals.sum()
if self._null_fill_value:
return sp_sum
else:
nsparse = self.sp_index.ngaps
return sp_sum + self.fill_value * nsparse
def cumsum(self, axis=0, *args, **kwargs):
"""
Cumulative sum of non-NA/null values.
When performing the cumulative summation, any non-NA/null values will
be skipped. The resulting SparseArray will preserve the locations of
NaN values, but the fill value will be `np.nan` regardless.
Parameters
----------
axis : int or None
Axis over which to perform the cumulative summation. If None,
perform cumulative summation over flattened array.
Returns
-------
cumsum : SparseArray
"""
nv.validate_cumsum(args, kwargs)
if axis is not None and axis >= self.ndim: # Mimic ndarray behaviour.
raise ValueError("axis(={axis}) out of bounds".format(axis=axis))
if not self._null_fill_value:
return SparseArray(self.to_dense()).cumsum()
return SparseArray(self.sp_values.cumsum(), sparse_index=self.sp_index,
fill_value=self.fill_value)
def mean(self, axis=0, *args, **kwargs):
"""
Mean of non-NA/null values
Returns
-------
mean : float
"""
|
nv.validate_mean(args, kwargs)
|
pandas.compat.numpy.function.validate_mean
|
def main_func(user_screen_name, tweets_count, tweets_to_print):
from apiclient import discovery
from httplib2 import Http
import oauth2client
from oauth2client import file, client, tools
import io
from googleapiclient.http import MediaIoBaseDownload
import tweepy
import csv
import pandas as pd
from bs4 import BeautifulSoup
from nltk.tokenize import WordPunctTokenizer
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.stem.lancaster import LancasterStemmer
from gensim.models import Word2Vec
import multiprocessing
from nltk.corpus import stopwords
import re
import pickle
import numpy as np
from sklearn.model_selection import train_test_split
import pandas as pd
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
import numpy as np
import gensim
import os
import warnings
import nltk
from IPython.display import display, HTML
from nltk.tag import StanfordNERTagger
from watson_developer_cloud import NaturalLanguageUnderstandingV1
from watson_developer_cloud.natural_language_understanding_v1 \
import Features, EntitiesOptions, KeywordsOptions, CategoriesOptions, SentimentOptions
warnings.filterwarnings(action='ignore')
|
pd.set_option('display.max_columns', 7)
|
pandas.set_option
|
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
'''获得6个文件名'''
direc = r'E:\5研一上\大数据系统b\清华大学-人口预测\北京六环内人流情况统计9-10\北京六环内人流情况统计\统计结果\preprocess_data'
filecsv_list = [] #数据文件名
for _,_,files in os.walk(direc):
for file in files:
if os.path.splitext(file)[1]=='.csv':
filecsv_list.append(file)
'''读取所需的几列数据'''
data = pd.DataFrame() #数据
center = 15*54+30 #网格号
col_indexs = [center-55,center-54,center-53,center,center-1,center+1,\
center+53,center+54,center+55]
for csv in filecsv_list:
cur_dir = os.path.join(direc,csv)
data0 =
|
pd.read_csv(cur_dir,header=0,index_col=0,engine='python')
|
pandas.read_csv
|
USAGE="""
Create baseyear controls for MTC Bay Area populationsim.
This script does the following:
1) Downloads the relevant Census tables to a local cache specified by CensusFetcher.LOCAL_CACHE_FOLDER,
one table per file in CSV format. These files are the raw tables at a census geography appropriate
for the control geographies in this script, although the column headers have additional variables
that are more descriptive of what the columns mean.
To re-download the data using the Census API, remove the cache file.
2) It then combines the columns in the Census tables to match the control definitions in the
CONTROLS structure in the script.
3) Finally, it transforms the control tables from the Census geographies to the desired control
geography using the MAZ_TAZ_DEF_FILE, which defines MAZs and TAZs as unions of Census blocks.
For controls derived from census data which is available at smaller geographies, this is a
simple aggregation.
However, for controls derived from census data which is not available at smaller geographies,
it is assumed that the smaller geography's total (e.g. households) are apportioned similarly
to it's census geography, and the controls are tallied that way.
4) Creates a simple file, output_[model_year]/maz_data_hh_pop.csv with 3 columns:
MAZ,hh,tot_pop for use in the maz_data.csv that will consistent with these controls, where
these "hh" include the 1-person group quarters households and the tot_pop includes both household
and group quarter persons.
5) It joins the MAZs and TAZs to the 2000 PUMAs (used in the 2007-2011 PUMS, which is
used by create_seed_population.py) and saves these crosswalks as well.
Outputs: households /data/[model_year]_[maz,taz,county]_controls.csv
households /data/geo_cross_walk.csv
group_quarters/data/[model_year]_maz_controls.csv
group_quarters/data/geo_cross_walk.csv
output_[model_year]/maz_data_hh_pop.csv
create_baseyear_controls_[model_year].log
"""
import argparse, collections, logging, os, sys
import census, us
import numpy, pandas, simpledbf
MAZ_TAZ_DEF_FILE = "M:\\Data\\GIS layers\\TM2_maz_taz_v2.2\\blocks_mazs_tazs.csv"
MAZ_TAZ_PUMA_FILE = "M:\\Data\\GIS layers\\TM2_maz_taz_v2.2\\mazs_TM2_v2_2_intersect_puma2000.dbf" # NOTE these are PUMA 2000
AGE_MAX = 130 # max person age
NKID_MAX = 10 # max number of kids
NPER_MAX = 10 # max number of persons
NWOR_MAX = 10 # max number of workers
HINC_MAX = 2000000
# COUNTY coding - census to our county code
COUNTY_RECODE = pandas.DataFrame([{"GEOID_county":"06001", "COUNTY":4, "county_name":"Alameda" , "REGION":1},
{"GEOID_county":"06013", "COUNTY":5, "county_name":"Contra Costa" , "REGION":1},
{"GEOID_county":"06041", "COUNTY":9, "county_name":"Marin" , "REGION":1},
{"GEOID_county":"06055", "COUNTY":7, "county_name":"Napa" , "REGION":1},
{"GEOID_county":"06075", "COUNTY":1, "county_name":"San Francisco", "REGION":1},
{"GEOID_county":"06081", "COUNTY":2, "county_name":"San Mateo" , "REGION":1},
{"GEOID_county":"06085", "COUNTY":3, "county_name":"Santa Clara" , "REGION":1},
{"GEOID_county":"06095", "COUNTY":6, "county_name":"Solano" , "REGION":1},
{"GEOID_county":"06097", "COUNTY":8, "county_name":"Sonoma" , "REGION":1}])
class CensusFetcher:
"""
Class to fetch the census data needed for these controls and cache them.
Uses the census python package (https://pypi.org/project/census/)
"""
# Location of the Census API key
API_KEY_FILE = "M:\\Data\\Census\\API\\api-key.txt"
# Store cache of census tables here
LOCAL_CACHE_FOLDER = "M:\\Data\\Census\\CachedTablesForPopulationSimControls"
CA_STATE_FIPS = "06"
BAY_AREA_COUNTY_FIPS = collections.OrderedDict([
("Alameda" ,"001"),
("Contra Costa" ,"013"),
("Marin" ,"041"),
("Napa" ,"055"),
("San Francisco","075"),
("San Mateo" ,"081"),
("Santa Clara" ,"085"),
("Solano" ,"095"),
("Sonoma" ,"097"),
])
# https://api.census.gov/data/2011/acs/acs5/variables.html
# https://api.census.gov/data/2012/acs5/variables.html
# https://api.census.gov/data/2010/sf1/variables.html
# https://api.census.gov/data/2015/acs5/variables.html
# https://api.census.gov/data/2015/acs1/variables.html
CENSUS_DEFINITIONS = {
"H13":[ # sf1, H13. Household Size [8]
# Universe: Occupied housing units
["variable","pers_min", "pers_max"],
["H0130001", 1, NPER_MAX], # Occupied housing units
["H0130002", 1, 1], # 1-person household
["H0130003", 2, 2], # 2-person household
["H0130004", 3, 3], # 3-person household
["H0130005", 4, 4], # 4-person household
["H0130006", 5, 5], # 5-person household
["H0130007", 6, 6], # 6-person household
["H0130008", 7, NPER_MAX], # 7-or-more-person household
],
"P16":[ # sf1, P16. POPULATION IN HOUSEHOLDS BY AGE
# Universe: Population in households
["variable", "age_min", "age_max"],
["P0160001", 0, AGE_MAX], # Population in households
["P0160002", 0, 17], # Under 18 years
["P0160003", 18, AGE_MAX], # 18 years and over
],
"P12":[ # sf1, P12. Sex By Age [49]
# Universe: Total population
["variable", "sex", "age_min", "age_max"],
["P0120001", "All", 0, AGE_MAX], # Total population
["P0120002", "Male", 0, AGE_MAX], # Male:
["P0120003", "Male", 0, 4], # Male: Under 5 years
["P0120004", "Male", 5, 9], # Male: 5 to 9 years
["P0120005", "Male", 10, 14], # Male: 10 to 14 years
["P0120006", "Male", 15, 17], # Male: 15 to 17 years
["P0120007", "Male", 18, 19], # Male: 18 and 19 years
["P0120008", "Male", 20, 20], # Male: 20 years
["P0120009", "Male", 21, 21], # Male: 21 years
["P0120010", "Male", 22, 24], # Male: 22 to 24 years
["P0120011", "Male", 25, 29], # Male: 25 to 29 years
["P0120012", "Male", 30, 34], # Male: 30 to 34 years
["P0120013", "Male", 35, 39], # Male: 35 to 39 years
["P0120014", "Male", 40, 44], # Male: 40 to 44 years
["P0120015", "Male", 45, 49], # Male: 45 to 49 years
["P0120016", "Male", 50, 54], # Male: 50 to 54 years
["P0120017", "Male", 55, 59], # Male: 55 to 59 years
["P0120018", "Male", 60, 61], # Male: 60 and 61 years
["P0120019", "Male", 62, 64], # Male: 62 to 64 years
["P0120020", "Male", 65, 66], # Male: 65 and 66 years
["P0120021", "Male", 67, 69], # Male: 67 to 69 years
["P0120022", "Male", 70, 74], # Male: 70 to 74 years",
["P0120023", "Male", 75, 79], # Male: 75 to 79 years",
["P0120024", "Male", 80, 84], # Male: 80 to 84 years",
["P0120025", "Male", 85, AGE_MAX], # Male: 85 years and over",
["P0120026", "Female", 0, AGE_MAX], # Female:
["P0120027", "Female", 0, 4], # Female: Under 5 years
["P0120028", "Female", 5, 9], # Female: 5 to 9 years
["P0120029", "Female", 10, 14], # Female: 10 to 14 years
["P0120030", "Female", 15, 17], # Female: 15 to 17 years
["P0120031", "Female", 18, 19], # Female: 18 and 19 years
["P0120032", "Female", 20, 20], # Female: 20 years
["P0120033", "Female", 21, 21], # Female: 21 years
["P0120034", "Female", 22, 24], # Female: 22 to 24 years
["P0120035", "Female", 25, 29], # Female: 25 to 29 years
["P0120036", "Female", 30, 34], # Female: 30 to 34 years
["P0120037", "Female", 35, 39], # Female: 35 to 39 years
["P0120038", "Female", 40, 44], # Female: 40 to 44 years
["P0120039", "Female", 45, 49], # Female: 45 to 49 years
["P0120040", "Female", 50, 54], # Female: 50 to 54 years
["P0120041", "Female", 55, 59], # Female: 55 to 59 years
["P0120042", "Female", 60, 61], # Female: 60 and 61 years
["P0120043", "Female", 62, 64], # Female: 62 to 64 years
["P0120044", "Female", 65, 66], # Female: 65 and 66 years
["P0120045", "Female", 67, 69], # Female: 67 to 69 years
["P0120046", "Female", 70, 74], # Female: 70 to 74 years",
["P0120047", "Female", 75, 79], # Female: 75 to 79 years",
["P0120048", "Female", 80, 84], # Female: 80 to 84 years",
["P0120049", "Female", 85, AGE_MAX], # Female: 85 years and over",
],
"B01001":[ # acs5, B01001. SEX BY AGE
# Universe: Total population
["variable", "sex", "age_min", "age_max"],
["B01001_001E", "All", 0, AGE_MAX], # Total population
["B01001_002E", "Male", 0, AGE_MAX], # Male
["B01001_003E", "Male", 0, 4], # Male Under 5 years
["B01001_004E", "Male", 5, 9], # Male 5 to 9 years
["B01001_005E", "Male", 10, 14], # Male 10 to 14 years
["B01001_006E", "Male", 15, 17], # Male 15 to 17 years
["B01001_007E", "Male", 18, 19], # Male 18 and 19 years
["B01001_008E", "Male", 20, 20], # Male 20 years
["B01001_009E", "Male", 21, 21], # Male 21 years
["B01001_010E", "Male", 22, 24], # Male 22 to 24 years
["B01001_011E", "Male", 25, 29], # Male 25 to 29 years
["B01001_012E", "Male", 30, 34], # Male 30 to 34 years
["B01001_013E", "Male", 35, 39], # Male 35 to 39 years
["B01001_014E", "Male", 40, 44], # Male 40 to 44 years
["B01001_015E", "Male", 45, 49], # Male 45 to 49 years
["B01001_016E", "Male", 50, 54], # Male 50 to 54 years
["B01001_017E", "Male", 55, 59], # Male 55 to 59 years
["B01001_018E", "Male", 60, 61], # Male 60 and 61 years
["B01001_019E", "Male", 62, 64], # Male 62 to 64 years
["B01001_020E", "Male", 65, 66], # Male 65 and 66 years
["B01001_021E", "Male", 67, 69], # Male 67 to 69 years
["B01001_022E", "Male", 70, 74], # Male 70 to 74 years
["B01001_023E", "Male", 75, 79], # Male 75 to 79 years
["B01001_024E", "Male", 80, 84], # Male 80 to 84 years
["B01001_025E", "Male", 85, AGE_MAX], # Male 85 years and over
["B01001_026E", "Female", 0, AGE_MAX], # Female
["B01001_027E", "Female", 0, 4], # Female Under 5 years
["B01001_028E", "Female", 5, 9], # Female 5 to 9 years
["B01001_029E", "Female", 10, 14], # Female 10 to 14 years
["B01001_030E", "Female", 15, 17], # Female 15 to 17 years
["B01001_031E", "Female", 18, 19], # Female 18 and 19 years
["B01001_032E", "Female", 20, 20], # Female 20 years
["B01001_033E", "Female", 21, 21], # Female 21 years
["B01001_034E", "Female", 22, 24], # Female 22 to 24 years
["B01001_035E", "Female", 25, 29], # Female 25 to 29 years
["B01001_036E", "Female", 30, 34], # Female 30 to 34 years
["B01001_037E", "Female", 35, 39], # Female 35 to 39 years
["B01001_038E", "Female", 40, 44], # Female 40 to 44 years
["B01001_039E", "Female", 45, 49], # Female 45 to 49 years
["B01001_040E", "Female", 50, 54], # Female 50 to 54 years
["B01001_041E", "Female", 55, 59], # Female 55 to 59 years
["B01001_042E", "Female", 60, 61], # Female 60 and 61 years
["B01001_043E", "Female", 62, 64], # Female 62 to 64 years
["B01001_044E", "Female", 65, 66], # Female 65 and 66 years
["B01001_045E", "Female", 67, 69], # Female 67 to 69 years
["B01001_046E", "Female", 70, 74], # Female 70 to 74 years
["B01001_047E", "Female", 75, 79], # Female 75 to 79 years
["B01001_048E", "Female", 80, 84], # Female 80 to 84 years
["B01001_049E", "Female", 85, AGE_MAX], # Female 85 years and over
],
"B11002":[ # acs5, B11002. HOUSEHOLD TYPE BY RELATIVES AND NONRELATIVES FOR POPULATION IN HOUSEHOLDS
# Universe: Population in households
["variable" ],
["B11002_001E"], # Estimate: Total
],
"B11005":[ # B11005. acs5, HOUSEHOLDS BY PRESENCE OF PEOPLE UNDER 18 YEARS BY HOUSEHOLD TYPE
# Universe: Households
["variable", "family", "famtype", "num_kids_min", "num_kids_max"],
["B11005_002E","All", "All", 1, NKID_MAX], # Households with one or more people under 18 years
["B11005_011E","All", "All", 0, 0], # Households with no people under 18 years
],
"P43":[ # sf1, P43. GROUP QUARTERS POPULATION BY SEX BY AGE BY GROUP QUARTERS TYPE [63]
# Universe: Population in group quarters
["variable", "sex", "age_min", "age_max", "inst","subcategory" ],
["P0430001", "All", 0, 130, "All", "All" ],
["P0430002", "Male", 0, 130, "All", "All" ],
["P0430003", "Male", 0, 17, "All", "All" ],
["P0430004", "Male", 0, 17, "Inst", "All" ], # Institutionalized population (101-106, 201-203, 301, 401-405):
["P0430005", "Male", 0, 17, "Inst", "Correctional"], # Institutionalized population (101-106, 201-203, 301, 401-405): - Correctional facilities for adults (101-106)
["P0430006", "Male", 0, 17, "Inst", "Juvenile" ], # Institutionalized population (101-106, 201-203, 301, 401-405): - Juvenile facilities (201-203)
["P0430007", "Male", 0, 17, "Inst", "Nursing" ], # Institutionalized population (101-106, 201-203, 301, 401-405): - Nursing facilities/Skilled-nursing facilities (301)
["P0430008", "Male", 0, 17, "Inst", "Other" ], # Institutionalized population (101-106, 201-203, 301, 401-405): - Other institutional facilities (401-405)
["P0430009", "Male", 0, 17, "Noninst", "All" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904):
["P0430010", "Male", 0, 17, "Noninst", "College" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904): - College/University student housing (501)
["P0430011", "Male", 0, 17, "Noninst", "Military" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904): - Military quarters (601-602)
["P0430012", "Male", 0, 17, "Noninst", "Other" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904): - Other noninstitutional facilities (701-702, 704, 706, 801-802, 900-901, 903-904)
["P0430013", "Male", 18, 64, "All", "All" ],
["P0430014", "Male", 18, 64, "Inst", "All" ], # Institutionalized population (101-106, 201-203, 301, 401-405):
["P0430015", "Male", 18, 64, "Inst", "Correctional"], # Institutionalized population (101-106, 201-203, 301, 401-405): - Correctional facilities for adults (101-106)
["P0430016", "Male", 18, 64, "Inst", "Juvenile" ], # Institutionalized population (101-106, 201-203, 301, 401-405): - Juvenile facilities (201-203)
["P0430017", "Male", 18, 64, "Inst", "Nursing" ], # Institutionalized population (101-106, 201-203, 301, 401-405): - Nursing facilities/Skilled-nursing facilities (301)
["P0430018", "Male", 18, 64, "Inst", "Other" ], # Institutionalized population (101-106, 201-203, 301, 401-405): - Other institutional facilities (401-405)
["P0430019", "Male", 18, 64, "Noninst", "All" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904):
["P0430020", "Male", 18, 64, "Noninst", "College" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904): - College/University student housing (5
["P0430021", "Male", 18, 64, "Noninst", "Military" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904): - Military quarters (601-602)
["P0430022", "Male", 18, 64, "Noninst", "Other" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904): - Other noninstitutional facilities (701-702, 704, 706, 801-802, 900-901, 903-904)
["P0430023", "Male", 65, 130, "All", "All" ],
["P0430024", "Male", 65, 130, "Inst", "All" ], # Institutionalized population (101-106, 201-203, 301, 401-405):
["P0430025", "Male", 65, 130, "Inst", "Correctional"], # Institutionalized population (101-106, 201-203, 301, 401-405): - Correctional facilities for adults (101-106)
["P0430026", "Male", 65, 130, "Inst", "Juvenile" ], # Institutionalized population (101-106, 201-203, 301, 401-405): - Juvenile facilities (201-203)
["P0430027", "Male", 65, 130, "Inst", "Nursing" ], # Institutionalized population (101-106, 201-203, 301, 401-405): - Nursing facilities/Skilled-nursing facilities (301)
["P0430028", "Male", 65, 130, "Inst", "Other" ], # Institutionalized population (101-106, 201-203, 301, 401-405): - Other institutional facilities (401-405)
["P0430029", "Male", 65, 130, "Noninst", "All" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904):
["P0430030", "Male", 65, 130, "Noninst", "College" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904): - College/University student housing (5
["P0430031", "Male", 65, 130, "Noninst", "Military" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904): - Military quarters (601-602)
["P0430032", "Male", 65, 130, "Noninst", "Other" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904): - Other noninstitutional facilities (701-702, 704, 706, 801-802, 900-901, 903-904)
["P0430033", "Male", 0, 130, "All", "All" ],
["P0430034", "Female", 0, 17, "All", "All" ],
["P0430035", "Female", 0, 17, "Inst", "All" ], # Institutionalized population (101-106, 201-203, 301, 401-405):
["P0430036", "Female", 0, 17, "Inst", "Correctional"], # Institutionalized population (101-106, 201-203, 301, 401-405): - Correctional facilities for adults (101-106)
["P0430037", "Female", 0, 17, "Inst", "Juvenile" ], # Institutionalized population (101-106, 201-203, 301, 401-405): - Juvenile facilities (201-203)
["P0430038", "Female", 0, 17, "Inst", "Nursing" ], # Institutionalized population (101-106, 201-203, 301, 401-405): - Nursing facilities/Skilled-nursing facilities (301)
["P0430039", "Female", 0, 17, "Inst", "Other" ], # Institutionalized population (101-106, 201-203, 301, 401-405): - Other institutional facilities (401-405)
["P0430040", "Female", 0, 17, "Noninst", "All" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904):
["P0430041", "Female", 0, 17, "Noninst", "College" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904): - College/University student housing (501)
["P0430042", "Female", 0, 17, "Noninst", "Military" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904): - Military quarters (601-602)
["P0430043", "Female", 0, 17, "Noninst", "Other" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904): - Other noninstitutional facilities (701-702, 704, 706, 801-802, 900-901, 903-904)
["P0430044", "Female", 18, 64, "All", "All" ],
["P0430045", "Female", 18, 64, "Inst", "All" ], # Institutionalized population (101-106, 201-203, 301, 401-405):
["P0430046", "Female", 18, 64, "Inst", "Correctional"], # Institutionalized population (101-106, 201-203, 301, 401-405): - Correctional facilities for adults (101-106)
["P0430047", "Female", 18, 64, "Inst", "Juvenile" ], # Institutionalized population (101-106, 201-203, 301, 401-405): - Juvenile facilities (201-203)
["P0430048", "Female", 18, 64, "Inst", "Nursing" ], # Institutionalized population (101-106, 201-203, 301, 401-405): - Nursing facilities/Skilled-nursing facilities (301)
["P0430049", "Female", 18, 64, "Inst", "Other" ], # Institutionalized population (101-106, 201-203, 301, 401-405): - Other institutional facilities (401-405)
["P0430050", "Female", 18, 64, "Noninst", "All" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904):
["P0430051", "Female", 18, 64, "Noninst", "College" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904): - College/University student housing (5
["P0430052", "Female", 18, 64, "Noninst", "Military" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904): - Military quarters (601-602)
["P0430053", "Female", 18, 64, "Noninst", "Other" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904): - Other noninstitutional facilities (701-702, 704, 706, 801-802, 900-901, 903-904)
["P0430054", "Female", 65, 130, "All", "All" ],
["P0430055", "Female", 65, 130, "Inst", "All" ], # Institutionalized population (101-106, 201-203, 301, 401-405):
["P0430056", "Female", 65, 130, "Inst", "Correctional"], # Institutionalized population (101-106, 201-203, 301, 401-405): - Correctional facilities for adults (101-106)
["P0430057", "Female", 65, 130, "Inst", "Juvenile" ], # Institutionalized population (101-106, 201-203, 301, 401-405): - Juvenile facilities (201-203)
["P0430058", "Female", 65, 130, "Inst", "Nursing" ], # Institutionalized population (101-106, 201-203, 301, 401-405): - Nursing facilities/Skilled-nursing facilities (301)
["P0430059", "Female", 65, 130, "Inst", "Other" ], # Institutionalized population (101-106, 201-203, 301, 401-405): - Other institutional facilities (401-405)
["P0430060", "Female", 65, 130, "Noninst", "All" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904):
["P0430061", "Female", 65, 130, "Noninst", "College" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904): - College/University student housing (5
["P0430062", "Female", 65, 130, "Noninst", "Military" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904): - Military quarters (601-602)
["P0430063", "Female", 65, 130, "Noninst", "Other" ], # Noninstitutionalized population (501, 601-602, 701-702, 704, 706, 801-802, 900-901, 903-904): - Other noninstitutional facilities (701-702, 704, 706, 801-802, 900-901, 903-904)
],
"B23025":[ # acs5, B23025. EMPLOYMENT STATUS FOR THE POPULATION 16 YEARS AND OVER
# Universe: Population 16 years and over
["variable", "inlaborforce", "type", "employed" ],
["B23025_001E", "All", "All", "All" ], # Total
["B23025_002E", "Yes", "All", "All" ], # In labor force
["B23025_003E", "Yes", "Civilian", "All" ], # In labor force, Civilian labor force
["B23025_004E", "Yes", "Civilian", "Employed" ], # In labor force, Civilian labor force, Employed
["B23025_005E", "Yes", "Civilian", "Unemployed"], # In labor force, Civilian labor force, Unemployed
["B23025_006E", "Yes", "Armed Forces", "Employed" ], # In labor force, Armed Forces
["B23025_007E", "No", "All", "All" ], # Not in labor force
],
"B26001":[ # acs5, B26001. GROUP QUARTERS POPULATION
# Universe: Population in group quarters
["variable" ],
["B26001_001E"], # Estimate: Total
],
"PCT16":[ # sf1, PCT16. HOUSEHOLD TYPE BY NUMBER OF PEOPLE UNDER 18 YEARS (EXCLUDING HOUSEHOLDERS, SPOUSES, AND UNMARRIED PARTNERS) [26]
# Universe: Households
["variable", "family", "famtype", "num_kids_min", "num_kids_max"],
["PCT0160001", "All", "All", 0, NKID_MAX], # Total
["PCT0160002", "Family", "All", 0, NKID_MAX], # Family households:
["PCT0160003", "Family", "HusWif", 0, NKID_MAX], # Family households: - Husband-wife family:
["PCT0160004", "Family", "HusWif", 0, 0], # Family households: - Husband-wife family: - With no children under 18 years
["PCT0160005", "Family", "HusWif", 1, 1], # Family households: - Husband-wife family: - With one child under 18 years
["PCT0160006", "Family", "HusWif", 2, 2], # Family households: - Husband-wife family: - With two children under 18 years
["PCT0160007", "Family", "HusWif", 3, 3], # Family households: - Husband-wife family: - With three children under 18 years
["PCT0160008", "Family", "HusWif", 4, NKID_MAX], # Family households: - Husband-wife family: - With four or more children under 18 years
["PCT0160009", "Family", "MaleH", 0, NKID_MAX], # Family households: - Male householder, no wife present:
["PCT0160010", "Family", "MaleH", 0, 0], # Family households: - Male householder, no wife present: - With no children under 18 years
["PCT0160011", "Family", "MaleH", 1, 1], # Family households: - Male householder, no wife present: - With one child under 18 years
["PCT0160012", "Family", "MaleH", 2, 2], # Family households: - Male householder, no wife present: - With two children under 18 years
["PCT0160013", "Family", "MaleH", 3, 3], # Family households: - Male householder, no wife present: - With three children under 18 years
["PCT0160014", "Family", "MaleH", 4, NKID_MAX], # Family households: - Male householder, no wife present: - With four or more children under 18 years
["PCT0160015", "Family", "FemaleH", 0, NKID_MAX], # Family households: - Female householder, no husband present:
["PCT0160016", "Family", "FemaleH", 0, 0], # Family households: - Female householder, no husband present: - With no children under 18 years
["PCT0160017", "Family", "FemaleH", 1, 1], # Family households: - Female householder, no husband present: - With one child under 18 years
["PCT0160018", "Family", "FemaleH", 2, 2], # Family households: - Female householder, no husband present: - With two children under 18 years
["PCT0160019", "Family", "FemaleH", 3, 3], # Family households: - Female householder, no husband present: - With three children under 18 years
["PCT0160020", "Family", "FemaleH", 4, NKID_MAX], # Family households: - Female householder, no husband present: - With four or more children under 18 years
["PCT0160021", "Nonfamily","All", 0, NKID_MAX], # Nonfamily households:
["PCT0160022", "Nonfamily","All", 0, 0], # Nonfamily households: - With no children under 18 years
["PCT0160023", "Nonfamily","All", 1, 1], # Nonfamily households: - With one child under 18 years
["PCT0160024", "Nonfamily","All", 2, 2], # Nonfamily households: - With two children under 18 years
["PCT0160025", "Nonfamily","All", 3, 3], # Nonfamily households: - With three children under 18 years
["PCT0160026", "Nonfamily","All", 4, NKID_MAX], # Nonfamily households: - With four or more children under 18 years
],
"B08202":[ # acs5, B08202. HOUSEHOLD SIZE BY NUMBER OF WORKERS IN HOUSEHOLD
# Universe: Households
["variable", "workers_min","workers_max","persons_min","persons_max"],
["B08202_001E", 0, NWOR_MAX, 0, NPER_MAX], # Total:
["B08202_002E", 0, 0, 0, NPER_MAX], # Total: - No workers
["B08202_003E", 1, 1, 0, NPER_MAX], # Total: - 1 worker
["B08202_004E", 2, 2, 0, NPER_MAX], # Total: - 2 workers
["B08202_005E", 3, NWOR_MAX, 0, NPER_MAX], # Total: - 3 or more workers
["B08202_006E", 0, NWOR_MAX, 1, 1], # Total: - 1-person household:
["B08202_007E", 0, 0, 1, 1], # Total: - 1-person household: - No workers
["B08202_008E", 1, 1, 1, 1], # Total: - 1-person household: - 1 worker
["B08202_009E", 0, NWOR_MAX, 2, 2], # Total: - 2-person household:
["B08202_010E", 0, 0, 2, 2], # Total: - 2-person household: - No workers
["B08202_011E", 1, 1, 2, 2], # Total: - 2-person household: - 1 worker
["B08202_012E", 2, 2, 2, 2], # Total: - 2-person household: - 2 workers
["B08202_013E", 0, NWOR_MAX, 3, 3], # Total: - 3-person household:
["B08202_014E", 0, 0, 3, 3], # Total: - 3-person household: - No workers
["B08202_015E", 1, 1, 3, 3], # Total: - 3-person household: - 1 worker
["B08202_016E", 2, 2, 3, 3], # Total: - 3-person household: - 2 workers
["B08202_017E", 3, 3, 3, 3], # Total: - 3-person household: - 3 workers
["B08202_018E", 0, NWOR_MAX, 4, NPER_MAX], # Total: - 4-or-more-person household:
["B08202_019E", 0, 0, 4, NPER_MAX], # Total: - 4-or-more-person household: - No workers
["B08202_020E", 1, 1, 4, NPER_MAX], # Total: - 4-or-more-person household: - 1 worker
["B08202_021E", 2, 2, 4, NPER_MAX], # Total: - 4-or-more-person household: - 2 workers
["B08202_022E", 3, NWOR_MAX, 4, NPER_MAX], # Total: - 4-or-more-person household: - 3 or more workers
],
"B11016":[ # acs5, B11016. HOUSEHOLD TYPE BY HOUSEHOLD SIZE
# Universe: Households
["variable", "family", "pers_min", "pers_max"],
["B11016_001E", "All", 0, NPER_MAX], # Total
["B11016_002E", "Family", 0, NPER_MAX], # Family households
["B11016_003E", "Family", 2, 2], # Family households, 2-person household
["B11016_004E", "Family", 3, 3], # Family households, 3-person household
["B11016_005E", "Family", 4, 4], # Family households, 4-person household
["B11016_006E", "Family", 5, 5], # Family households, 5-person household
["B11016_007E", "Family", 6, 6], # Family households, 6-person household
["B11016_008E", "Family", 7, NPER_MAX], # Family households, 7-or-more person household
["B11016_009E", "Nonfamily", 0, NPER_MAX], # Nonfamily households
["B11016_010E", "Nonfamily", 1, 1], # Nonfamily households, 1-person household
["B11016_011E", "Nonfamily", 2, 2], # Nonfamily households, 2-person household
["B11016_012E", "Nonfamily", 3, 3], # Nonfamily households, 3-person household
["B11016_013E", "Nonfamily", 4, 4], # Nonfamily households, 4-person household
["B11016_014E", "Nonfamily", 5, 5], # Nonfamily households, 5-person household
["B11016_015E", "Nonfamily", 6, 6], # Nonfamily households, 6-person household
["B11016_016E", "Nonfamily", 7, NPER_MAX], # Nonfamily households, 7-or-more person household
],
"B19001":[ # acs5, B19001. HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2010 INFLATION-ADJUSTED DOLLARS):
# Universe: Households
# USE acs 2006-2010 https://api.census.gov/data/2010/acs5/variables.html for 2010 dollars
["variable", "hhinc_min", "hhinc_max"],
["B19001_001E", 0, HINC_MAX], # Households
["B19001_002E", 0, 9999], # Households Less than $10,000
["B19001_003E", 10000, 14999], # Households $10,000 to $14,999
["B19001_004E", 15000, 19999], # Households $15,000 to $19,999
["B19001_005E", 20000, 24999], # Households $20,000 to $24,999
["B19001_006E", 25000, 29999], # Households $25,000 to $29,999
["B19001_007E", 30000, 34999], # Households $30,000 to $34,999
["B19001_008E", 35000, 39999], # Households $35,000 to $39,999
["B19001_009E", 40000, 44999], # Households $40,000 to $44,999
["B19001_010E", 45000, 49999], # Households $45,000 to $49,999
["B19001_011E", 50000, 59999], # Households $50,000 to $59,999
["B19001_012E", 60000, 74999], # Households $60,000 to $74,999
["B19001_013E", 75000, 99999], # Households $75,000 to $99,999
["B19001_014E", 100000, 124999], # Households $100,000 to $124,999
["B19001_015E", 125000, 149999], # Households $125,000 to $149,999
["B19001_016E", 150000, 199999], # Households $150,000 to $199,999
["B19001_017E", 200000, HINC_MAX], # Households $200,000 or more
],
"C24010":[ # acs5, C24010. SEX BY OCCUPATION FOR THE CIVILIAN EMPLOYED POPULATION 16 YEARS AND OVER
# Universe: Civilian employed population 16 years and over
["variable", "sex", "occ_cat1", "occ_cat2", "occ_cat3" ],
["C24010_001E", "All", "All", "All", "All" ],
["C24010_002E", "Male", "All", "All", "All" ],
["C24010_003E", "Male", "Management, business, science, and arts", "All", "All" ],
["C24010_004E", "Male", "Management, business, science, and arts", "Management, business, and financial", "All" ],
["C24010_005E", "Male", "Management, business, science, and arts", "Management, business, and financial", "Management" ],
["C24010_006E", "Male", "Management, business, science, and arts", "Management, business, and financial", "Business and financial operations" ],
["C24010_007E", "Male", "Management, business, science, and arts", "Computer, engineering, and science", "All" ],
["C24010_008E", "Male", "Management, business, science, and arts", "Computer, engineering, and science", "Computer and mathematical" ],
["C24010_009E", "Male", "Management, business, science, and arts", "Computer, engineering, and science", "Architecture and engineering" ],
["C24010_010E", "Male", "Management, business, science, and arts", "Computer, engineering, and science", "Life, physical, and social science" ],
["C24010_011E", "Male", "Management, business, science, and arts", "Education, legal, community service, arts, and media", "All" ],
["C24010_012E", "Male", "Management, business, science, and arts", "Education, legal, community service, arts, and media", "Community and social service" ],
["C24010_013E", "Male", "Management, business, science, and arts", "Education, legal, community service, arts, and media", "Legal" ],
["C24010_014E", "Male", "Management, business, science, and arts", "Education, legal, community service, arts, and media", "Education, training, and library" ],
["C24010_015E", "Male", "Management, business, science, and arts", "Education, legal, community service, arts, and media", "Arts, design, entertainment, sports, and media" ],
["C24010_016E", "Male", "Management, business, science, and arts", "Healthcare practitioners and technical", "All" ],
["C24010_017E", "Male", "Management, business, science, and arts", "Healthcare practitioners and technical", "Health diagnosing and treating practitioners and other technical" ],
["C24010_018E", "Male", "Management, business, science, and arts", "Healthcare practitioners and technical", "Health technologists and technicians" ],
["C24010_019E", "Male", "Service", "All", "All" ],
["C24010_020E", "Male", "Service", "Healthcare support", "All" ],
["C24010_021E", "Male", "Service", "Protective service", "All" ],
["C24010_022E", "Male", "Service", "Protective service", "Fire fighting and prevention, and other protective service workers"], # including supervisors
["C24010_023E", "Male", "Service", "Protective service", "Law enforcement workers" ], # including supervisors
["C24010_024E", "Male", "Service", "Food preparation and serving related", "All" ],
["C24010_025E", "Male", "Service", "Building and grounds cleaning and maintenance", "All" ],
["C24010_026E", "Male", "Service", "Personal care and service", "All" ],
["C24010_027E", "Male", "Sales and office", "All", "All" ],
["C24010_028E", "Male", "Sales and office", "Sales and related", "All" ],
["C24010_029E", "Male", "Sales and office", "Office and administrative support", "All" ],
["C24010_030E", "Male", "Natural resources, construction, and maintenance", "All", "All" ],
["C24010_031E", "Male", "Natural resources, construction, and maintenance", "Farming, fishing, and forestry", "All" ],
["C24010_032E", "Male", "Natural resources, construction, and maintenance", "Construction and extraction", "All" ],
["C24010_033E", "Male", "Natural resources, construction, and maintenance", "Installation, maintenance, and repair", "All" ],
["C24010_034E", "Male", "Production, transportation, and material moving", "All", "All" ],
["C24010_035E", "Male", "Production, transportation, and material moving", "Production", "All" ],
["C24010_036E", "Male", "Production, transportation, and material moving", "Transportation", "All" ],
["C24010_037E", "Male", "Production, transportation, and material moving", "Material moving", "All" ],
["C24010_038E", "Female", "All", "All", "All" ],
["C24010_039E", "Female", "Management, business, science, and arts", "All", "All" ],
["C24010_040E", "Female", "Management, business, science, and arts", "Management, business, and financial", "All" ],
["C24010_041E", "Female", "Management, business, science, and arts", "Management, business, and financial", "Management" ],
["C24010_042E", "Female", "Management, business, science, and arts", "Management, business, and financial", "Business and financial operations" ],
["C24010_043E", "Female", "Management, business, science, and arts", "Computer, engineering, and science", "All" ],
["C24010_044E", "Female", "Management, business, science, and arts", "Computer, engineering, and science", "Computer and mathematical" ],
["C24010_045E", "Female", "Management, business, science, and arts", "Computer, engineering, and science", "Architecture and engineering" ],
["C24010_046E", "Female", "Management, business, science, and arts", "Computer, engineering, and science", "Life, physical, and social science" ],
["C24010_047E", "Female", "Management, business, science, and arts", "Education, legal, community service, arts, and media", "All" ],
["C24010_048E", "Female", "Management, business, science, and arts", "Education, legal, community service, arts, and media", "Community and social service" ],
["C24010_049E", "Female", "Management, business, science, and arts", "Education, legal, community service, arts, and media", "Legal" ],
["C24010_050E", "Female", "Management, business, science, and arts", "Education, legal, community service, arts, and media", "Education, training, and library" ],
["C24010_051E", "Female", "Management, business, science, and arts", "Education, legal, community service, arts, and media", " Arts, design, entertainment, sports, and media" ],
["C24010_052E", "Female", "Management, business, science, and arts", "Healthcare practitioners and technical", "All" ],
["C24010_053E", "Female", "Management, business, science, and arts", "Healthcare practitioners and technical", "Health diagnosing and treating practitioners and other technical" ],
["C24010_054E", "Female", "Management, business, science, and arts", "Healthcare practitioners and technical", "Health technologists and technicians" ],
["C24010_055E", "Female", "Service", "All", "All" ],
["C24010_056E", "Female", "Service", "Healthcare support", "All" ],
["C24010_057E", "Female", "Service", "Protective service", "All" ],
["C24010_058E", "Female", "Service", "Protective service", "Fire fighting and prevention, and other protective service" ], # including supervisors
["C24010_059E", "Female", "Service", "Protective service", "Law enforcement workers" ], # including supervisors
["C24010_060E", "Female", "Service", "Food preparation and serving related", "All" ],
["C24010_061E", "Female", "Service", "Building and grounds cleaning and maintenance", "All" ],
["C24010_062E", "Female", "Service", "Personal care and service", "All" ],
["C24010_063E", "Female", "Sales and office", "All", "All" ],
["C24010_064E", "Female", "Sales and office", "Sales and related", "All" ],
["C24010_065E", "Female", "Sales and office", "Office and administrative support", "All" ],
["C24010_066E", "Female", "Natural resources, construction, and maintenance", "All", "All" ],
["C24010_067E", "Female", "Natural resources, construction, and maintenance", "Farming, fishing, and forestry", "All" ],
["C24010_068E", "Female", "Natural resources, construction, and maintenance", "Construction and extraction", "All" ],
["C24010_069E", "Female", "Natural resources, construction, and maintenance", "Installation, maintenance, and repair", "All" ],
["C24010_070E", "Female", "Production, transportation, and material moving", "All", "All" ],
["C24010_071E", "Female", "Production, transportation, and material moving", "Production", "All" ],
["C24010_072E", "Female", "Production, transportation, and material moving", "Transportation", "All" ],
["C24010_073E", "Female", "Production, transportation, and material moving", "Material moving", "All" ],
]
}
def __init__(self):
"""
Read the census api key and instantiate the census object.
"""
# read the census api key
with open(CensusFetcher.API_KEY_FILE) as f: self.CENSUS_API_KEY = f.read()
self.census = census.Census(self.CENSUS_API_KEY)
logging.debug("census object instantiated")
def get_census_data(self, dataset, year, table, geo):
"""
Dataset is one of "sf1" or "ac5"
Year is a number for the table
Geo is one of "block", "block group", "tract", "county subdivision" or "county"
"""
if dataset not in ["sf1","acs5"]:
raise ValueError("get_census_data only supports datasets 'sf1' and 'acs5'")
if geo not in ["block", "block group", "tract", "county subdivision", "county"]:
raise ValueError("get_census_data received unsupported geo {0}".format(geo))
if table not in CensusFetcher.CENSUS_DEFINITIONS.keys():
raise ValueError("get_census_data received unsupported table {0}".format(table))
table_cache_file = os.path.join(CensusFetcher.LOCAL_CACHE_FOLDER, "{0}_{1}_{2}_{3}.csv".format(dataset,year,table,geo))
logging.info("Checking for table cache at {0}".format(table_cache_file))
# lookup table definition
table_def = CensusFetcher.CENSUS_DEFINITIONS[table]
# logging.debug(table_def)
table_cols = table_def[0] # e.g. ['variable', 'pers_min', 'pers_max']
if geo=="block":
geo_index = ["state","county","tract","block"]
elif geo=="block group":
geo_index = ["state","county","tract","block group"]
elif geo=="tract":
geo_index = ["state","county","tract"]
elif geo=="county subdivision":
geo_index = ["state","county","county subdivision"]
elif geo=="county":
geo_index = ["state","county"]
# lookup cache and return, if it exists
if os.path.exists(table_cache_file):
logging.info("Reading {0}".format(table_cache_file))
dtypes_dict = {k:object for k in geo_index}
# This version doesn't make the index columns into strings
# full_df_v1 = pandas.read_csv(table_cache_file,
# header=range(len(table_cols)),
# index_col=range(len(geo_index)), dtype=dtypes_dict)
# we want the index columns as strings
# https://github.com/pandas-dev/pandas/issues/9435
full_df = pandas.read_csv(table_cache_file, dtype=dtypes_dict, skiprows=len(table_cols)).set_index(geo_index)
full_df_cols = pandas.read_csv(table_cache_file,
header=range(len(table_cols)),
index_col=range(len(geo_index)),nrows=0).columns
full_df.columns = full_df_cols
return full_df
multi_col_def = [] # we'll build this
full_df = None # and this
for census_col in table_def[1:]:
# census_col looks like ['H0130001', 1, 10]
# fetch for one county at a time
df = pandas.DataFrame()
# loop through counties (unless getting at county level)
county_codes = CensusFetcher.BAY_AREA_COUNTY_FIPS.values()
if geo=="county": county_codes = ["do_once"]
for county_code in county_codes:
if geo == "county":
geo_dict = {'for':'{0}:*'.format(geo), 'in':'state:{0}'.format(CensusFetcher.CA_STATE_FIPS)}
else:
geo_dict = {'for':'{0}:*'.format(geo),
'in':'state:{0} county:{1}'.format(CensusFetcher.CA_STATE_FIPS, county_code)}
if dataset == "sf1":
county_df = pandas.DataFrame.from_records(self.census.sf1.get(census_col[0], geo_dict, year=year)).set_index(geo_index)
elif dataset == "acs5":
county_df = pandas.DataFrame.from_records(self.census.acs5.get(census_col[0], geo_dict, year=year)).set_index(geo_index)
# force the data column to be a float -- sometimes they're objects which won't work
county_df = county_df.astype(float)
df = df.append(county_df)
# join with existing full_df
if len(multi_col_def) == 0:
full_df = df
else:
full_df = full_df.merge(df, left_index=True, right_index=True)
# note column defs
multi_col_def.append(census_col)
if geo=="county":
# if we fetched for county then we have all counties -- restrict to just the counties we care about
county_tuples = [(CensusFetcher.CA_STATE_FIPS, x) for x in CensusFetcher.BAY_AREA_COUNTY_FIPS.values()]
full_df = full_df.loc[county_tuples]
# logging.debug(full_df.head())
# now we have the table with multiple columns -- name the columns with decoded names
full_df.columns = pandas.MultiIndex.from_tuples(multi_col_def, names=table_cols)
# logging.debug(full_df.head())
# write it out
full_df.to_csv(table_cache_file, header=True, index=True)
logging.info("Wrote {0}".format(table_cache_file))
return full_df
def add_aggregate_geography_colums(table_df):
"""
Given a table with column GEOID_block, creates columns for GEOID_[county,tract,block group]
"""
if "GEOID_block" in table_df.columns:
table_df["GEOID_county" ] = table_df["GEOID_block"].str[:5 ]
table_df["GEOID_tract" ] = table_df["GEOID_block"].str[:11]
table_df["GEOID_block group"] = table_df["GEOID_block"].str[:12]
def census_col_is_in_control(param_dict, control_dict):
"""
param_dict is from CENSUS_DEFINITIONS, e.g. OrderedDict([('pers_min',4), ('pers_max', 4)])
control_dict is from control definitions, e.g. OrderedDict([('pers_min',4), ('pers_max',10)])
Checks if this census column should be included in the control.
Returns True or False.
"""
# assume true unless kicked out
for control_name, control_val in control_dict.iteritems():
if control_name not in param_dict:
pass # later
# if the value is a string, require exact match
if isinstance(control_val, str):
if control_dict[control_name] != param_dict[control_name]:
return False
continue
# otherwise, check the min/max ranges
if control_name.endswith('_min') and param_dict[control_name] < control_dict[control_name]:
# census includes values less than control allows
return False
if control_name.endswith('_max') and param_dict[control_name] > control_dict[control_name]:
# census includes values greater than control allows
return False
return True
def create_control_table(control_name, control_dict_list, census_table_name, census_table_df):
"""
Given a control list of ordered dictionary (e.g. [{"pers_min":1, "pers_max":NPER_MAX}]) for a specific control,
returns a version of the census table with just the relevant column.
"""
logging.info("Creating control table for {}".format(control_name))
logging.debug("\n{}".format(census_table_df.head()))
# construct a new dataframe to return with same index as census_table_df
control_df = pandas.DataFrame(index=census_table_df.index, columns=[control_name], data=0)
# logging.debug control_df.head()
# logging.debug(census_table_df.columns.names)
# [u'variable', u'pers_min', u'pers_max']
# logging.debug(census_table_df.columns.get_level_values(0))
# Index([u'H0130001', u'H0130002', u'H0130003', u'H0130004', u'H0130005', u'H0130006', u'H0130007', u'H0130008'], dtype='object', name=u'variable')
# logging.debug(census_table_df.columns.get_level_values(1))
# Index([u'1', u'1', u'2', u'3', u'4', u'5', u'6', u'7'], dtype='object', name=u'pers_min')
# logging.debug(census_table_df.columns.get_level_values(2))
# Index([u'10', u'1', u'2', u'3', u'4', u'5', u'6', u'10'], dtype='object', name=u'pers_max')
# the control_dict_list is a list of dictionaries -- iterate through them
prev_sum = 0
for control_dict in control_dict_list:
# if there's only one column and no attributes are expected then we're done
if len(control_dict) == 0 and len(census_table_df.columns.values) == 1:
variable_name = census_table_df.columns.values[0]
logging.info("No attributes specified; single column identified: {}".format(variable_name))
control_df[control_name] = census_table_df[variable_name]
else:
logging.info(" Control definition:")
for cname,cval in control_dict.iteritems(): logging.info(" {:15} {}".format(cname, cval))
# find the relevant column, if there is one
for colnum in range(len(census_table_df.columns.levels[0])):
param_dict = collections.OrderedDict()
# level 0 is the Census variable name, e.g. H0130001
variable_name = census_table_df.columns.get_level_values(0)[colnum]
for paramnum in range(1, len(census_table_df.columns.names)):
param = census_table_df.columns.names[paramnum]
try: # assume this is an int but fall back if it's nominal
param_dict[param] = int(census_table_df.columns.get_level_values(paramnum)[colnum])
except:
param_dict[param] = census_table_df.columns.get_level_values(paramnum)[colnum]
# logging.debug(param_dict)
# Is this single column sufficient?
if param_dict == control_dict:
logging.info(" Found a single matching column: [{}]".format(variable_name))
for pname,pval in param_dict.iteritems(): logging.info(" {:15} {}".format(pname, pval))
control_df["temp"] = census_table_df[variable_name]
control_df[control_name] = census_table_df[variable_name]
control_df.drop(columns="temp", inplace=True)
break # stop iterating through columns
# Otherwise, if it's in the range, add it in
if census_col_is_in_control(param_dict, control_dict):
logging.info(" Adding column [{}]".format(variable_name))
for pname,pval in param_dict.iteritems(): logging.info(" {:15} {}".format(pname, pval))
control_df["temp"] = census_table_df[variable_name]
control_df[control_name] = control_df[control_name] + control_df["temp"]
control_df.drop(columns="temp", inplace=True)
# assume each control dict needs to find *something*
new_sum = control_df[control_name].sum()
logging.info(" => Total added: {:,}".format(new_sum - prev_sum))
assert( new_sum > prev_sum)
prev_sum = new_sum
return control_df
def match_control_to_geography(control_name, control_table_df, control_geography, census_geography,
maz_taz_def_df, temp_controls, full_region,
scale_numerator, scale_denominator, subtract_table):
"""
Given a control table in the given census geography, this method will transform the table to the appropriate
control geography and return it.
Pass full_region=False if this is a test subset so the control totals don't need to add up to the census table total.
Pass scale_numerator and scale_denominator to scale numbers by scale_numerator/scale_denominator, where those are temp tables.
Or pass subtract_table to subtract out a temp table.
"""
if control_geography not in ["MAZ","TAZ","COUNTY","REGION"]:
raise ValueError("match_control_to_geography passed unsupported control geography {}".format(control_geography))
if census_geography not in ["block","block group","tract","county subdivision","county"]:
raise ValueError("match_control_to_geography passed unsupported census geography {}".format(census_geography))
# to verify we kept the totals
variable_total = control_table_df[control_name].sum()
logger.debug("Variable_total: {:,}".format(variable_total))
GEO_HIERARCHY = { 'MAZ' :['block','MAZ','block group','tract','county subdivision','county'],
'TAZ' :['block', 'TAZ', 'tract','county subdivision','county'],
'COUNTY':['block', 'block group','tract','county subdivision','county','COUNTY'],
'REGION':['block', 'block group','tract','county subdivision','county','REGION']}
control_geo_index = GEO_HIERARCHY[control_geography].index(control_geography)
try:
census_geo_index = GEO_HIERARCHY[control_geography].index(census_geography)
except:
census_geo_index = -1
# consolidate geography columns
control_table_df.reset_index(drop=False, inplace=True)
if census_geography=="block":
control_table_df["GEOID_block"] = control_table_df["state"] + control_table_df["county"] + control_table_df["tract"] + control_table_df["block"]
elif census_geography=="block group":
control_table_df["GEOID_block group"] = control_table_df["state"] + control_table_df["county"] + control_table_df["tract"] + control_table_df["block group"]
elif census_geography=="tract":
control_table_df["GEOID_tract"] = control_table_df["state"] + control_table_df["county"] + control_table_df["tract"]
elif census_geography=="county":
control_table_df["GEOID_county"] = control_table_df["state"] + control_table_df["county"]
# drop the others
control_table_df = control_table_df[["GEOID_{}".format(census_geography), control_name]]
# if this is a temp, don't go further -- we'll use it later
if control_name.startswith("temp_"):
logging.info("Temporary Total for {} ({} rows) {:,}".format(control_name, len(control_table_df), control_table_df[control_name].sum()))
logging.debug("head:\n{}".format(control_table_df.head()))
if scale_numerator or scale_denominator:
scale_numerator_geometry = temp_controls[scale_numerator].columns[0]
scale_denominator_geometry = temp_controls[scale_denominator].columns[0]
# join to the one that's the same length
logging.debug("Temp with numerator {} denominator {}".format(scale_numerator, scale_denominator))
logging.debug(" {} has geometry {} and length {}".format(scale_numerator,
scale_numerator_geometry, len(temp_controls[scale_numerator])))
logging.debug(" Head:\n{}".format(temp_controls[scale_numerator].head()))
logging.debug(" {} has geometry {} and length {}".format(scale_denominator,
scale_denominator_geometry, len(temp_controls[scale_denominator])))
logging.debug(" Head:\n{}".format(temp_controls[scale_denominator].head()))
# one should match -- try denom
if len(temp_controls[scale_denominator]) == len(control_table_df):
control_table_df = pandas.merge(left=control_table_df, right=temp_controls[scale_denominator], how="left")
control_table_df["temp_fraction"] = control_table_df[control_name] / control_table_df[scale_denominator]
# if the denom is 0, warn and convert infinite fraction to zero
zero_denom_df = control_table_df.loc[control_table_df["temp_fraction"]==numpy.inf].copy()
if len(zero_denom_df) > 0:
logging.warn(" DROPPING Inf (sum {}):\n{}".format(zero_denom_df[control_name].sum(), str(zero_denom_df)))
control_table_df.loc[control_table_df["temp_fraction"]==numpy.inf, "temp_fraction"] = 0
logging.debug("Divided by {} temp_fraction mean:{} Head:\n{}".format(scale_denominator, control_table_df["temp_fraction"].mean(), control_table_df.head()))
# but return table at numerator geography
numerator_df = temp_controls[scale_numerator].copy()
add_aggregate_geography_colums(numerator_df)
control_table_df = pandas.merge(left=numerator_df, right=control_table_df, how="left")
logging.debug("Joined with num ({} rows) :\n{}".format(len(control_table_df), control_table_df.head()))
control_table_df[control_name] = control_table_df["temp_fraction"] * control_table_df[scale_numerator]
# keep only geometry column name and control
control_table_df = control_table_df[[scale_numerator_geometry, control_name]]
logging.debug("Final Total: {:,} ({} rows) Head:\n{}".format(control_table_df[control_name].sum(),
len(control_table_df), control_table_df.head()))
elif len(temp_controls[scale_numerator]) == len(control_table_df):
raise NotImplementedError("Temp scaling by numerator of same geography not implemented yet")
else:
raise ValueError("Temp scaling requires numerator or denominator geography to match")
return control_table_df
# if the census geography is smaller than the target geography, this is a simple aggregation
if census_geo_index >= 0 and census_geo_index < control_geo_index:
logging.info("Simple aggregation from {} to {}".format(census_geography, control_geography))
if scale_numerator and scale_denominator:
assert(len(temp_controls[scale_numerator ]) == len(control_table_df))
assert(len(temp_controls[scale_denominator]) == len(control_table_df))
logging.info(" Scaling by {}/{}".format(scale_numerator,scale_denominator))
control_table_df = pandas.merge(left=control_table_df, right=temp_controls[scale_numerator ], how="left")
control_table_df = pandas.merge(left=control_table_df, right=temp_controls[scale_denominator], how="left")
control_table_df[control_name] = control_table_df[control_name] * control_table_df[scale_numerator]/control_table_df[scale_denominator]
control_table_df.fillna(0, inplace=True)
variable_total = variable_total * temp_controls[scale_numerator][scale_numerator].sum()/temp_controls[scale_denominator][scale_denominator].sum()
if subtract_table:
assert(len(temp_controls[subtract_table]) == len(control_table_df))
logging.info(" Initial total {:,}".format(control_table_df[control_name].sum()))
logging.info(" Subtracting out {} with sum {:,}".format(subtract_table, temp_controls[subtract_table][subtract_table].sum()))
control_table_df = pandas.merge(left=control_table_df, right=temp_controls[subtract_table], how="left")
control_table_df[control_name] = control_table_df[control_name] - control_table_df[subtract_table]
variable_total = variable_total - temp_controls[subtract_table][subtract_table].sum()
# we really only need these columns - control geography and the census geography
geo_mapping_df = maz_taz_def_df[[control_geography, "GEOID_{}".format(census_geography)]].drop_duplicates()
control_table_df = pandas.merge(left=control_table_df, right=geo_mapping_df, how="left")
# aggregate now
final_df = control_table_df[[control_geography, control_name]].groupby(control_geography).aggregate(numpy.sum)
# verify the totals didn't change
logging.debug("total at the end: {:,}".format(final_df[control_name].sum()))
if full_region and not scale_numerator: assert(abs(final_df[control_name].sum() - variable_total) < 0.5)
logging.info(" => Total for {} {:,}".format(control_name, final_df[control_name].sum()))
return final_df
# the census geography is larger than the target geography => proportional scaling is required
# proportion = column / scale_denominator (these should be at the same geography)
# and then multiply by the scale_numerator (which should be at a smaller geography)
# e.g. hh_inc_15_prop = hh_inc_15 / temp_num_hh_bg (at block group)
# then multiply this by the households at the block level to get hh_inc_15 for blocks (these will be floats)
# and aggregate to control geo (e.g. TAZ)
if scale_numerator == None or scale_denominator == None:
msg = "Cannot go from larger census geography {} without numerator and denominator specified".format(census_geography)
logging.fatal(msg)
raise ValueError(msg)
logging.info("scale_numerator={} scale_denominator={}".format(scale_numerator, scale_denominator))
# verify the last one matches our geography
same_geo_total_df = temp_controls[scale_denominator]
assert(len(same_geo_total_df) == len(control_table_df))
proportion_df = pandas.merge(left=control_table_df, right=same_geo_total_df, how="left")
proportion_var = "{} proportion".format(control_name)
proportion_df[proportion_var] = proportion_df[control_name] / proportion_df[scale_denominator]
logging.info("Create proportion {} at {} geography via {} using {}/{}\n{}".format(
proportion_var, control_geography, census_geography,
control_name, scale_denominator, proportion_df.head()))
logging.info("Sums:\n{}".format(proportion_df[[control_name, scale_denominator]].sum()))
logging.info("Mean:\n{}".format(proportion_df[[proportion_var]].mean()))
# join this to the maz_taz_definition - it'll be the lowest level
block_prop_df = pandas.merge(left=maz_taz_def_df, right=proportion_df, how="left")
# this is the first temp table, our multiplier
block_total_df = temp_controls[scale_numerator]
block_prop_df =
|
pandas.merge(left=block_prop_df, right=block_total_df, how="left")
|
pandas.merge
|
from __future__ import division
from . import cholesky_errors, mahalanobis, VariogramFourthRoot
from . import pivoted_cholesky
import numpy as np
from numpy.linalg import solve, cholesky
from scipy.linalg import cho_solve
import scipy.stats as stats
from statsmodels.sandbox.distributions.mv_normal import MVT
import seaborn as sns
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from cycler import cycler
from itertools import cycle
__all__ = ['Diagnostic', 'GraphicalDiagnostic']
class Diagnostic:
R"""A class for quickly testing model checking methods discussed in Bastos & O'Hagan.
This class is under construction and the implementation may change in the future.
Parameters
----------
mean : array, shape = (n_samples,)
The mean
cov : array, shape = (n_samples, n_samples)
The covariance
df : int, optional
The degrees of freedom. Defaults to `None`, which treats the distribution as Gaussian
random_state : int, optional
The random state for the random number generator
"""
def __init__(self, mean, cov, df=None, random_state=1):
self.mean = mean
self.cov = cov
self.sd = sd = np.sqrt(np.diag(cov))
if df is None:
# TODO: Handle when cov is ill-conditioned so multivariate_normal fails.
self.dist = stats.multivariate_normal(mean=mean, cov=cov)
# try:
# self.dist = stats.multivariate_normal(mean=mean, cov=cov)
# except np.linalg.LinAlgError:
# self.dist = None
self.udist = stats.norm(loc=mean, scale=sd)
self.std_udist = stats.norm(loc=0., scale=1.)
else:
sigma = cov * (df - 2) / df
self.dist = MVT(mean=mean, sigma=sigma, df=df)
self.udist = stats.t(loc=mean, scale=sd, df=df)
self.std_udist = stats.t(loc=0., scale=1., df=df)
self.dist.random_state = random_state
self.udist.random_state = random_state
self.std_udist.random_state = random_state
self._chol = cholesky(self.cov)
self._pchol = pivoted_cholesky(self.cov)
e, v = np.linalg.eigh(self.cov)
# To match Bastos and O'Hagan definition
# i.e., eigenvalues ordered from largest to smallest
e, v = e[::-1], v[:, ::-1]
ee = np.diag(np.sqrt(e))
self._eig = v @ ee
def samples(self, n):
R"""Sample random variables
Parameters
----------
n : int
The number of curves to sample
Returns
-------
array, shape = (n_samples, n_curves)
"""
return self.dist.rvs(n).T
def individual_errors(self, y):
R"""Computes the scaled individual errors diagnostic
.. math::
D_I(y) = \frac{y-m}{\sigma}
Parameters
----------
y : array, shape = (n_samples, [n_curves])
Returns
-------
array : shape = (n_samples, [n_curves])
"""
return ((y.T - self.mean) / np.sqrt(np.diag(self.cov))).T
def cholesky_errors(self, y):
return cholesky_errors(y.T, self.mean, self._chol).T
def pivoted_cholesky_errors(self, y):
return solve(self._pchol, (y.T - self.mean).T)
def eigen_errors(self, y):
return solve(self._eig, (y.T - self.mean).T)
def chi2(self, y):
return np.sum(self.individual_errors(y), axis=0)
def md_squared(self, y):
R"""Computes the squared Mahalanobis distance"""
return mahalanobis(y.T, self.mean, self._chol) ** 2
def kl(self, mean, cov):
R"""The Kullback-Leibler divergence between two multivariate normal distributions
.. math::
D_{KL}(N_0 | N_1) = \frac{1}{2} \left [
\mathrm{Tr}(\Sigma_1^{-1}\Sigma_0)
+ (\mu_1 - \mu_0)^T \Sigma_1^{-1} (\mu_1 - \mu_0)
- k + \log\left(\frac{\det \Sigma_1}{\det \Sigma_0}\right)
\right]
where :math:`k` is the dimension of Normal distributions. The :math:`\mu_1` and :math:`\Sigma_1` are those
fed during the initialization of the Diagnostic object, and :math:`\mu_0` and :math:`\Sigma_0` are the
arguments of this function.
Parameters
----------
mean : array, shape = (n_samples,)
cov : array, shape = (n_samples, n_samples)
Returns
-------
float
The KL divergence
"""
m1, c1, chol1 = self.mean, self.cov, self._chol
m0, c0 = mean, cov
tr = np.trace(cho_solve((chol1, True), c0))
dist = self.md_squared(m0)
k = c1.shape[-1]
logs = 2 * np.sum(np.log(np.diag(c1))) - np.linalg.slogdet(c0)[-1]
return 0.5 * (tr + dist - k + logs)
def credible_interval(self, y, intervals):
"""The credible interval diagnostic.
Parameters
----------
y : (n_samples, [n_curves]) shaped array
intervals : 1d array
The credible intervals at which to perform the test
Returns
-------
array, shape = ([n_curves], n_intervals)
"""
lower, upper = self.udist.interval(np.atleast_2d(intervals).T)
def diagnostic(data_, lower_, upper_):
indicator = (lower_ < data_) & (data_ < upper_) # 1 if in, 0 if out
return np.average(indicator, axis=1) # The diagnostic
dci = np.apply_along_axis(
diagnostic, axis=1, arr=np.atleast_2d(y).T, lower_=lower, upper_=upper)
if y.ndim == 1:
dci = np.squeeze(dci) # If y didn't have n_curves dim, then remove it now.
return dci
@staticmethod
def variogram(X, y, bin_bounds):
R"""Computes the variogram for the data y at input points X.
Parameters
----------
X
y
bin_bounds
Returns
-------
v : array
bin_locations :
gamma :
lower :
upper :
"""
v = VariogramFourthRoot(X, y, bin_bounds)
bin_locations = v.bin_locations
gamma, lower, upper = v.compute(rt_scale=False)
return v, bin_locations, gamma, lower, upper
class GraphicalDiagnostic:
R"""A class for plotting diagnostics and their reference distributions.
This class is under construction and the implementation may change in the future.
Parameters
----------
data : array, shape = (n_samples, n_curves)
The data to compute diagnostics against
mean : array
The mean for the diagnostic object
cov : array
The covariance of the diagnostic object
df : int, optional
If a Student-t distribution, then this is the degrees of freedom. If `None`, it is
treated as Gaussian
random_state : int, optional
nref : int
The number of samples to use in computing a reference distribution by simulation
colors : list
The colors to use for each curve
markers : list
The markers to use for each curve, where applicable.
Examples
--------
"""
# See: https://ianstormtaylor.com/design-tip-never-use-black/
# soft_black = '#262626'
def __init__(self, data, mean, cov, df=None, random_state=1, nref=1000, colors=None, markers=None, labels=None,
gray='lightgray', black='#262626', markeredgecolors=None, markerfillstyles=None):
self.diagnostic = Diagnostic(mean=mean, cov=cov, df=df, random_state=random_state)
if data.ndim == 1:
data = np.atleast_2d(data).T # Add n_curves dim if it doesn't exist
self.data = data
self.samples = self.diagnostic.samples(nref)
prop_list = list(mpl.rcParams['axes.prop_cycle'])
if colors is None:
# The standard Matplotlib 2.0 colors, or whatever they've been updated to be.
colors = [c['color'] for c in prop_list]
if markers is None:
markers = ['o' for c in prop_list]
if markeredgecolors is None:
markeredgecolors = [None for c in prop_list]
if markerfillstyles is None:
markerfillstyles = ['full' for c in prop_list]
if labels is None:
labels = np.array([r'$c_{{{}}}$'.format(i) for i in range(data.shape[-1])])
self.labels = labels
self.markers = markers
self.markeredgecolors = markeredgecolors
self.markerfillstyles = markerfillstyles
self.marker_cycle = cycler('marker', colors)
self.colors = colors
self.color_cycle = cycler('color', colors)
self.gray = gray
self.black = black
n = len(cov)
if df is None:
self.md_ref_dist = stats.chi2(df=n)
else:
self.md_ref_dist = stats.f(dfn=n, dfd=df, scale=(df-2)*n/df)
def error_plot(self, err, title=None, xlabel='Index', ylabel=None, ax=None):
if ax is None:
ax = plt.gca()
ax.axhline(0, 0, 1, linestyle='-', color=self.black, lw=1, zorder=0)
# The standardized 2 sigma bands since the sd has been divided out.
sd = self.diagnostic.std_udist.std()
ax.axhline(-2 * sd, 0, 1, color=self.gray, zorder=0, lw=1)
ax.axhline(2 * sd, 0, 1, color=self.gray, zorder=0, lw=1)
index = np.arange(1, self.data.shape[0]+1)
size = 8
if err.ndim == 1:
err = err[:, None]
for i, error in enumerate(err.T):
ax.plot(
index, error, ls='', color=self.colors[i],
marker=self.markers[i], markeredgecolor=self.markeredgecolors[i],
fillstyle=self.markerfillstyles[i], markersize=size, markeredgewidth=0.5
)
# ax.scatter(
# index, error, color=self.colors[i], marker=self.markers[i],
# edgecolor=self.markeredgecolors[i], linestyle=self.markerlinestyles[i]
# )
from matplotlib.ticker import MaxNLocator
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.set_xlabel(xlabel)
ax.margins(x=0.05)
ax.set_ylabel(ylabel)
ax.set_title(title)
return ax
def individual_errors(self, title='Individual Errors', ax=None):
err = self.diagnostic.individual_errors(self.data)
return self.error_plot(err, title=title, ax=ax)
def individual_errors_qq(self, title='Individual QQ Plot', ax=None):
return self.qq(self.data, self.samples, [0.68, 0.95], self.diagnostic.individual_errors,
title=title, ax=ax)
def cholesky_errors(self, title='Cholesky Errors', ax=None):
err = self.diagnostic.cholesky_errors(self.data)
return self.error_plot(err, title=title, ax=ax)
def cholesky_errors_qq(self, title='Cholesky QQ Plot', ax=None):
return self.qq(self.data, self.samples, [0.68, 0.95], self.diagnostic.cholesky_errors,
title=title, ax=ax)
def pivoted_cholesky_errors(self, title='Pivoted Cholesky Errors', ax=None):
err = self.diagnostic.pivoted_cholesky_errors(self.data)
return self.error_plot(err, title=title, ax=ax)
def pivoted_cholesky_errors_qq(self, title='Pivoted Cholesky QQ Plot', ax=None):
return self.qq(self.data, self.samples, [0.68, 0.95], self.diagnostic.pivoted_cholesky_errors,
title=title, ax=ax)
def eigen_errors(self, title='Eigen Errors', ax=None):
err = self.diagnostic.eigen_errors(self.data)
return self.error_plot(err, title=title, ax=ax)
def eigen_errors_qq(self, title='Eigen QQ Plot', ax=None):
return self.qq(self.data, self.samples, [0.68, 0.95], self.diagnostic.eigen_errors,
title=title, ax=ax)
def hist(self, data, ref, title=None, xlabel=None, ylabel=None, vlines=True, ax=None):
if hasattr(ref, 'ppf'):
lower_95 = ref.ppf(0.975)
upper_95 = ref.ppf(0.025)
x = np.linspace(lower_95, upper_95, 100)
ax.plot(x, ref.pdf(x), label='ref', color=self.black)
else:
ref_stats = stats.describe(ref)
ref_sd = np.sqrt(ref_stats.variance)
ref_mean = ref_stats.mean
# This doesn't exactly match 95% intervals from distribution
lower_95 = ref_mean - 2 * ref_sd
upper_95 = ref_mean + 2 * ref_sd
ax.hist(ref, density=1, label='ref', histtype='step', color=self.black)
if ax is None:
ax = plt.gca()
ax.axvline(lower_95, 0, 1, color='gray', linestyle='--', label=r'$2\sigma$')
ax.axvline(upper_95, 0, 1, color='gray', linestyle='--')
if vlines:
for c, d in zip(cycle(self.color_cycle), np.atleast_1d(data)):
ax.axvline(d, 0, 1, zorder=50, **c)
else:
ax.hist(data, density=1, label='data', histtype='step')
ax.legend()
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
return ax
def violin(self, data, ref, title=None, xlabel=None, ylabel=None, ax=None):
if ax is None:
ax = plt.gca()
n = len(data)
nref = len(ref)
orders = np.arange(n)
zero = np.zeros(len(data), dtype=int)
nans = np.nan * np.ones(nref)
fake = np.hstack((np.ones(nref, dtype=bool), np.zeros(nref, dtype=bool)))
fake_ref = np.hstack((fake[:, None], np.hstack((ref, nans))[:, None]))
label = 'label_' # Placeholder
ref_df = pd.DataFrame(fake_ref, columns=['fake', label])
tidy_data = np.hstack((orders[:, None], data[:, None]))
data_df = pd.DataFrame(tidy_data, columns=['orders', label])
sns.violinplot(x=np.zeros(2 * nref, dtype=int), y=label, data=ref_df,
color=self.gray, hue='fake', split=True, inner='box', ax=ax)
with sns.color_palette(self.colors):
sns.swarmplot(x=zero, y=label, data=data_df, hue='orders', ax=ax)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
ax.set_title(title)
ax.set_xlim(-0.05, 0.5)
return ax
def box(self, data, ref, title=None, xlabel=None, ylabel=None, trim=True, size=8, legend=False, ax=None):
if ax is None:
ax = plt.gca()
label = 'labelll' # Placeholder
# Plot reference dist
if hasattr(ref, 'ppf'):
gray = 'gray'
boxartist = self._dist_boxplot(
ref, ax=ax, positions=[0],
patch_artist=True,
widths=0.8)
for box in boxartist['boxes']:
box.update(dict(facecolor='lightgrey', edgecolor=gray))
for whisk in boxartist["whiskers"]:
whisk.update(dict(color=gray))
for cap in boxartist["caps"]:
cap.update(dict(color=gray))
for med in boxartist["medians"]:
med.update(dict(color=gray))
else:
nref = len(ref)
ref_df =
|
pd.DataFrame(ref, columns=[label])
|
pandas.DataFrame
|
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This test requires a running CAS server. You must use an ~/.authinfo
# file to specify your username and password. The CAS host and port must
# be specified using the CASHOST and CASPORT environment variables.
# A specific protocol ('cas', 'http', 'https', or 'auto') can be set using
# the CASPROTOCOL environment variable.
import copy
import datetime
import numpy as np
import pandas as pd
import os
import six
import swat
import swat.utils.testing as tm
import sys
import time
import unittest
from swat.cas.datamsghandlers import *
# Pick sort keys that will match across SAS and Pandas sorting orders
SORT_KEYS = ['Origin', 'MSRP', 'Horsepower', 'Model']
USER, PASSWD = tm.get_user_pass()
HOST, PORT, PROTOCOL = tm.get_host_port_proto()
class TestDataMsgHandlers(tm.TestCase):
# Create a class attribute to hold the cas host type
server_type = None
def setUp(self):
swat.reset_option()
swat.options.cas.print_messages = False
swat.options.interactive_mode = False
swat.options.cas.missing.int64 = -999999
self.s = swat.CAS(HOST, PORT, USER, PASSWD, protocol=PROTOCOL)
if self.s._protocol in ['http', 'https']:
tm.TestCase.skipTest(self, 'REST does not support data messages')
if type(self).server_type is None:
# Set once per class and have every test use it. No need to change between tests.
type(self).server_type = tm.get_cas_host_type(self.s)
self.srcLib = tm.get_casout_lib(self.server_type)
r = tm.load_data(self.s, 'datasources/cars_single.sashdat', self.server_type)
self.tablename = r['tableName']
self.assertNotEqual(self.tablename, None)
self.table = r['casTable']
def tearDown(self):
# tear down tests
try:
self.s.endsession()
except swat.SWATError:
pass
del self.s
swat.reset_option()
def test_csv(self):
import swat.tests as st
myFile = os.path.join(os.path.dirname(st.__file__), 'datasources', 'cars.csv')
cars = pd.io.parsers.read_csv(myFile)
dmh = swat.datamsghandlers.CSV(myFile, nrecs=20)
# Use the default caslib. Get it from the results, and use it in later actions.
out = self.s.addtable(table='cars', **dmh.args.addtable)
srcLib = out['caslib']
out = self.s.tableinfo(caslib=srcLib, table='cars')
data = out['TableInfo']
self.assertEqual(data.ix[:,'Name'][0], 'CARS')
self.assertEqual(data.ix[:,'Rows'][0], 428)
self.assertEqual(data.ix[:,'Columns'][0], 15)
out = self.s.columninfo(table=self.s.CASTable('cars', caslib=srcLib))
data = out['ColumnInfo']
self.assertEqual(len(data), 15)
self.assertEqual(data.ix[:,'Column'].tolist(), 'Make,Model,Type,Origin,DriveTrain,MSRP,Invoice,EngineSize,Cylinders,Horsepower,MPG_City,MPG_Highway,Weight,Wheelbase,Length'.split(','))
self.assertEqual(data.ix[:,'Type'].tolist(), ['varchar', 'varchar', 'varchar', 'varchar', 'varchar', 'int64', 'int64', 'double', 'int64', 'int64', 'int64', 'int64', 'int64', 'int64', 'int64'])
self.assertTablesEqual(cars, self.s.CASTable('cars', caslib=srcLib), sortby=SORT_KEYS)
self.s.droptable(caslib=srcLib, table='cars')
def test_dataframe(self):
# Boolean
s_bool_ = pd.Series([True, False], dtype=np.bool_)
s_bool8 = pd.Series([True, False], dtype=np.bool8)
# Integers
s_byte = pd.Series([100, 999], dtype=np.byte)
s_short = pd.Series([100, 999], dtype=np.short)
s_intc = pd.Series([100, 999], dtype=np.intc)
s_int_ = pd.Series([100, 999], dtype=np.int_)
s_longlong = pd.Series([100, 999], dtype=np.longlong)
s_intp = pd.Series([100, 999], dtype=np.intp)
s_int8 = pd.Series([100, 999], dtype=np.int8)
s_int16 = pd.Series([100, 999], dtype=np.int16)
s_int32 = pd.Series([100, 999], dtype=np.int32)
s_int64 = pd.Series([100, 999], dtype=np.int64)
# Unsigned integers
s_ubyte =
|
pd.Series([100, 999], dtype=np.ubyte)
|
pandas.Series
|
import sys
import pandas as pd
import sqlalchemy
#import sqlalchemy.orm
from context import config
import solr
## MySQL setup
mysql_engine = sqlalchemy.create_engine(
'mysql://%s:%s@localhost/%s?unix_socket=%s&charset=%s' %
(config.MYSQL_USER,
config.MYSQL_PASS,
config.MYSQL_DB,
config.MYSQL_SOCK,
'utf8mb4'),
convert_unicode=True)
## SOLR setup
sobj = solr.Solr()
sobj.setSolrURL('%s/select' % config.SOLR_ADDR)
## Get article IDs
am_id_df =
|
pd.read_sql('SELECT db_id FROM article_metadata', con=mysql_engine)
|
pandas.read_sql
|
# --------------
import pandas as pd
from sklearn import preprocessing
#path : File path
# Code starts here
# read the dataset
dataset =
|
pd.read_csv(path)
|
pandas.read_csv
|
import pandas as pd
import numpy as np
from .uvc_model import calc_phi_total
def summary_tables_maker_uvc(material_definition, x_file_paths, data, peeq='sat'):
""" Prints to screen the summary tables for the material optimization in LaTeX format for the updated VC model.
:param dict material_definition: Contains information about each material.
:param list x_file_paths: (str) Path for the files that contain the x values for each material.
:param list data: (list, pd.DataFrame) The test data used for calibration of each of the materials.
:param str or float peeq: If 'sat' then calculates the metrics at model saturation, otherwise a finite equivalent
plastic strain.
:return list: The first and second summary tables.
Notes:
- material_definition:
'material_id': (list, str) Identifier for each material.
'load_protocols': (list, str) Labels of the load protocols used, see [1] for definitions.
- The metrics in Table 2 are defined in [2].
- If a finite peeq is provided, the metrics are calculated assuming that peeq increases monotonically
to the provided value.
References:
[1] de Castro e Sousa and Lignos (2017), On the inverse problem of classic nonlinear plasticity models.
[2] de Castro e Sousa and Lignos (2018), Constrained optimization in metal plasticity inverse problems.
"""
# Output column labels
parameter_labels = [r'$E$[GPa]', r'$\sigma_{y,0}$[MPa]', r'$Q_\infty$[MPa]', r'$b$',
r'$D_\infty$[MPa]', r'$a$',
r'$C_1$[MPa]', r'$\gamma_1$', r'$C_2$[MPa]', r'$\gamma_2$',
r'$C_3$[MPa]', r'$\gamma_3$', r'$C_4$[MPa]', r'$\gamma_4$']
metric_labels = [r'$\sigma_{y,0}$[MPa]', r'$\sigma_{sat}$[MPa]', r'$\sigma_{hard}$[MPa]',
r'$\rho^{sat}_{yield}$', r'$\rho^{sat}_{iso}$', r'$\rho^{sat}_{kin}$', r'$\rho^{sat}_{D}$']
n_basic_param = 6
tab_1, tab_2 = _table_maker(material_definition, x_file_paths, data, parameter_labels, metric_labels,
n_basic_param, calc_upd_metrics=True, peeq=peeq)
return [tab_1, tab_2]
def summary_tables_maker_vc(material_definition, x_file_paths, data, peeq='sat'):
""" Prints to screen the summary tables for the material optimization in LaTeX format for the original VC model.
:param dict material_definition: Contains information about each material.
:param list x_file_paths: (str) Path for the files that contain the x values for each material.
:param list data: (list, pd.DataFrame) The test data used for calibration of each of the materials.
:param str or float peeq: If 'sat' then calculates the metrics at model saturation, otherwise a finite equivalent
plastic strain.
:return list: The first and second summary tables.
Notes:
- material_definition:
'material_id': (list, str) Identifier for each material.
'load_protocols': (list, str) Labels of the load protocols used, see [1] for definitions.
- The metrics in Table 2 are defined in [2].
- If a finite peeq is provided, the metrics are calculated assuming that peeq increases monotonically
to the provided value.
References:
[1] de Castro e Sousa and Lignos (2017), On the inverse problem of classic nonlinear plasticity models.
[2] de Castro e Sousa and Lignos (2018), Constrained optimization in metal plasticity inverse problems.
"""
# Output column labels
parameter_labels = [r'$E$[GPa]', r'$\sigma_{y,0}$[MPa]', r'$Q_\infty$[MPa]', r'$b$',
r'$C_1$[MPa]', r'$\gamma_1$', r'$C_2$[MPa]', r'$\gamma_2$',
r'$C_3$[MPa]', r'$\gamma_3$', r'$C_4$[MPa]', r'$\gamma_4$']
metric_labels = [r'$\sigma_{y,0}$[MPa]', r'$\sigma_{sat}$[MPa]', r'$\sigma_{hard}$[MPa]',
r'$\rho^{sat}_{yield}$', r'$\rho^{sat}_{iso}$', r'$\rho^{sat}_{kin}$']
n_basic_param = 4
tab_1, tab_2 = _table_maker(material_definition, x_file_paths, data, parameter_labels, metric_labels,
n_basic_param, calc_upd_metrics=False, peeq=peeq)
return [tab_1, tab_2]
def _table_maker(material_definition, x_file_paths, data, parameter_labels, metric_labels, num_basic_param,
calc_upd_metrics, peeq='sat'):
""" Base function to generate the tables. """
# Set some options for the display
pd.set_option('display.max_columns', 12)
pd.set_option('display.width', 300)
pd.set_option('display.float_format', '{:0.2f}'.format)
# Extract the properties from the definition
material_id = material_definition['material_id']
load_protocols = material_definition['load_protocols']
# Make the first table
phi_values = []
summary_table = pd.DataFrame()
for i, f in enumerate(x_file_paths):
x = pd.read_csv(f, delimiter=' ')
x = np.array(x.iloc[-1])
# Sort the backstresses so that the largest gamma value is first
gammas = x[num_basic_param + 1::2]
ind = np.flipud(np.argsort(gammas))
# Exchange the gammas
x[num_basic_param + 1::2] = x[2 * ind + num_basic_param + 1]
# Exchange the Cs
x[num_basic_param::2] = x[2 * ind + num_basic_param]
temp_table =
|
pd.DataFrame(x, columns=(material_id[i],))
|
pandas.DataFrame
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": pandas.StringDtype(),
"dramCorrectables1to1": pandas.StringDtype(),
"dramCorrectables4to1": pandas.StringDtype(),
"dramCorrectablesSram": pandas.StringDtype(),
"dramCorrectablesUnknown": pandas.StringDtype(),
"pliCapTestInterval": pandas.StringDtype(),
"pliCapTestCount": pandas.StringDtype(),
"pliCapTestResult": pandas.StringDtype(),
"pliCapTestTimeStamp": pandas.StringDtype(),
"channelHangSuccess": pandas.StringDtype(),
"channelHangFail": pandas.StringDtype(),
"BitErrorsHost41": pandas.StringDtype(),
"BitErrorsHost42": pandas.StringDtype(),
"BitErrorsHost43": pandas.StringDtype(),
"BitErrorsHost44": pandas.StringDtype(),
"BitErrorsHost45": pandas.StringDtype(),
"BitErrorsHost46": pandas.StringDtype(),
"BitErrorsHost47": pandas.StringDtype(),
"BitErrorsHost48": pandas.StringDtype(),
"BitErrorsHost49": pandas.StringDtype(),
"BitErrorsHost50": pandas.StringDtype(),
"BitErrorsHost51": pandas.StringDtype(),
"BitErrorsHost52": pandas.StringDtype(),
"BitErrorsHost53": pandas.StringDtype(),
"BitErrorsHost54": pandas.StringDtype(),
"BitErrorsHost55": pandas.StringDtype(),
"BitErrorsHost56": pandas.StringDtype(),
"mrrNearMiss": pandas.StringDtype(),
"mrrRereadAvg": pandas.StringDtype(),
"readDisturbEvictions": pandas.StringDtype(),
"L1L2ParityError": pandas.StringDtype(),
"pageDefects": pandas.StringDtype(),
"pageProvisionalTotal": pandas.StringDtype(),
"ASICTemp": pandas.StringDtype(),
"PMICTemp": pandas.StringDtype(),
"size": pandas.StringDtype(),
"lastWrite": pandas.StringDtype(),
"timesWritten": pandas.StringDtype(),
"maxNumContextBands": pandas.StringDtype(),
"blankCount": pandas.StringDtype(),
"cleanBands": pandas.StringDtype(),
"avgTprog": pandas.StringDtype(),
"avgEraseCount": pandas.StringDtype(),
"edtcHandledBandCnt": pandas.StringDtype(),
"bandReloForNLBA": pandas.StringDtype(),
"bandCrossingDuringPliCount": pandas.StringDtype(),
"bitErrBucketNum": pandas.StringDtype(),
"sramCorrectablesTotal": pandas.StringDtype(),
"l1SramCorrErrCnt": pandas.StringDtype(),
"l2SramCorrErrCnt": pandas.StringDtype(),
"parityErrorValue": pandas.StringDtype(),
"parityErrorType": pandas.StringDtype(),
"mrr_LutValidDataSize": pandas.StringDtype(),
"pageProvisionalDefects": pandas.StringDtype(),
"plisWithErasesInProgress": pandas.StringDtype(),
"lastReplayDebug": pandas.StringDtype(),
"externalPreReadFatals": pandas.StringDtype(),
"hostReadCmd": pandas.StringDtype(),
"hostWriteCmd": pandas.StringDtype(),
"trimmedSectors": pandas.StringDtype(),
"trimTokens": pandas.StringDtype(),
"mrrEventsInCodewords": pandas.StringDtype(),
"mrrEventsInSectors": pandas.StringDtype(),
"powerOnMicroseconds": pandas.StringDtype(),
"mrrInXorRecEvents": pandas.StringDtype(),
"mrrFailInXorRecEvents": pandas.StringDtype(),
"mrrUpperpageEvents": pandas.StringDtype(),
"mrrLowerpageEvents": pandas.StringDtype(),
"mrrSlcpageEvents": pandas.StringDtype(),
"mrrReReadTotal": pandas.StringDtype(),
"powerOnResets": pandas.StringDtype(),
"powerOnMinutes": pandas.StringDtype(),
"throttleOnMilliseconds": pandas.StringDtype(),
"ctxTailMagic": pandas.StringDtype(),
"contextDropCount": pandas.StringDtype(),
"lastCtxSequenceId": pandas.StringDtype(),
"currCtxSequenceId": pandas.StringDtype(),
"mbliEraseCount": pandas.StringDtype(),
"pageAverageProgramCount": pandas.StringDtype(),
"bandAverageEraseCount": pandas.StringDtype(),
"bandTotalEraseCount": pandas.StringDtype(),
"bandReloForXorRebuildFail": pandas.StringDtype(),
"defragSpeculativeMiss": pandas.StringDtype(),
"uncorrectableBackgroundScan": pandas.StringDtype(),
"BitErrorsHost57": pandas.StringDtype(),
"BitErrorsHost58": pandas.StringDtype(),
"BitErrorsHost59": pandas.StringDtype(),
"BitErrorsHost60": pandas.StringDtype(),
"BitErrorsHost61": pandas.StringDtype(),
"BitErrorsHost62": pandas.StringDtype(),
"BitErrorsHost63": pandas.StringDtype(),
"BitErrorsHost64":
|
pandas.StringDtype()
|
pandas.StringDtype
|
# -*- coding: utf-8 -*-
"""Test losses"""
import datetime
import numpy as np
import pandas as pd
from conftest import assert_series_equal
from pvlib.soiling import hsu, kimber
from pvlib.iotools import read_tmy3
from conftest import requires_scipy, DATA_DIR
import pytest
@pytest.fixture
def expected_output():
# Sample output (calculated manually)
dt = pd.date_range(start=pd.Timestamp(2019, 1, 1, 0, 0, 0),
end=pd.Timestamp(2019, 1, 1, 23, 59, 0), freq='1h')
expected_no_cleaning = pd.Series(
data=[0.96998483, 0.94623958, 0.92468139, 0.90465654, 0.88589707,
0.86826366, 0.85167258, 0.83606715, 0.82140458, 0.80764919,
0.79476875, 0.78273241, 0.77150951, 0.76106905, 0.75137932,
0.74240789, 0.73412165, 0.72648695, 0.71946981, 0.7130361,
0.70715176, 0.70178307, 0.69689677, 0.69246034],
index=dt)
return expected_no_cleaning
@pytest.fixture
def expected_output_1():
dt = pd.date_range(start=pd.Timestamp(2019, 1, 1, 0, 0, 0),
end=
|
pd.Timestamp(2019, 1, 1, 23, 59, 0)
|
pandas.Timestamp
|
#----------------------------------------------------------------------------------------------
####################
# IMPORT LIBRARIES #
####################
import streamlit as st
import pandas as pd
import numpy as np
import plotly as dd
import plotly.express as px
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.font_manager
import plotly.graph_objects as go
import functions as fc
import os
import altair as alt
import statsmodels.api as sm
from scipy import stats
from sklearn.metrics import make_scorer, mean_squared_error, r2_score, mean_absolute_error, explained_variance_score, roc_auc_score, max_error, log_loss, average_precision_score, precision_recall_curve, auc, roc_curve, confusion_matrix, recall_score, precision_score, f1_score, accuracy_score, balanced_accuracy_score, cohen_kappa_score
from sklearn.model_selection import train_test_split
import scipy
import sys
import platform
import base64
from io import BytesIO
from linearmodels import PanelOLS
from linearmodels import RandomEffects
from linearmodels import PooledOLS
#----------------------------------------------------------------------------------------------
def app():
# Clear cache
st.legacy_caching.clear_cache()
# Hide traceback in error messages (comment out for de-bugging)
sys.tracebacklimit = 0
# Show altair tooltip when full screen
st.markdown('<style>#vg-tooltip-element{z-index: 1000051}</style>',unsafe_allow_html=True)
# workaround for Firefox bug- hide the scrollbar while keeping the scrolling functionality
st.markdown("""
<style>
.ReactVirtualized__Grid::-webkit-scrollbar {
display: none;
}
.ReactVirtualized__Grid {
-ms-overflow-style: none; /* IE and Edge */
scrollbar-width: none; /* Firefox */
}
</style>
""", unsafe_allow_html=True)
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++++++++++++++++++++++++
# RESET INPUT
#Session state
if 'key' not in st.session_state:
st.session_state['key'] = 0
reset_clicked = st.sidebar.button("Reset all your input")
if reset_clicked:
st.session_state['key'] = st.session_state['key'] + 1
st.sidebar.markdown("")
#++++++++++++++++++++++++++++++++++++++++++++
# DATA IMPORT
# File upload section
df_dec = st.sidebar.radio("Get data", ["Use example dataset", "Upload data"], key = st.session_state['key'])
uploaded_data=None
if df_dec == "Upload data":
#st.subheader("Upload your data")
#uploaded_data = st.sidebar.file_uploader("Make sure that dot (.) is a decimal separator!", type=["csv", "txt"])
separator_expander=st.sidebar.expander('Upload settings')
with separator_expander:
a4,a5=st.columns(2)
with a4:
dec_sep=a4.selectbox("Decimal sep.",['.',','], key = st.session_state['key'])
with a5:
col_sep=a5.selectbox("Column sep.",[';', ',' , '|', '\s+', '\t','other'], key = st.session_state['key'])
if col_sep=='other':
col_sep=st.text_input('Specify your column separator', key = st.session_state['key'])
a4,a5=st.columns(2)
with a4:
thousands_sep=a4.selectbox("Thousands x sep.",[None,'.', ' ','\s+', 'other'], key = st.session_state['key'])
if thousands_sep=='other':
thousands_sep=st.text_input('Specify your thousands separator', key = st.session_state['key'])
with a5:
encoding_val=a5.selectbox("Encoding",[None,'utf_8','utf_8_sig','utf_16_le','cp1140','cp1250','cp1251','cp1252','cp1253','cp1254','other'], key = st.session_state['key'])
if encoding_val=='other':
encoding_val=st.text_input('Specify your encoding', key = st.session_state['key'])
# Error handling for separator selection:
if dec_sep==col_sep:
st.sidebar.error("Decimal and column separators cannot be identical!")
elif dec_sep==thousands_sep:
st.sidebar.error("Decimal and thousands separators cannot be identical!")
elif col_sep==thousands_sep:
st.sidebar.error("Column and thousands separators cannot be identical!")
uploaded_data = st.sidebar.file_uploader("Default separators: decimal '.' | column ';'", type=["csv", "txt"])
if uploaded_data is not None:
df = pd.read_csv(uploaded_data, decimal=dec_sep, sep = col_sep,thousands=thousands_sep,encoding=encoding_val, engine='python')
df_name=os.path.splitext(uploaded_data.name)[0]
st.sidebar.success('Loading data... done!')
elif uploaded_data is None:
df = pd.read_csv("default data/Grunfeld.csv", sep = ";|,|\t",engine='python')
df_name="Grunfeld"
else:
df = pd.read_csv("default data/Grunfeld.csv", sep = ";|,|\t",engine='python')
df_name="Grunfeld"
st.sidebar.markdown("")
#Basic data info
n_rows = df.shape[0]
n_cols = df.shape[1]
#++++++++++++++++++++++++++++++++++++++++++++
# SETTINGS
settings_expander=st.sidebar.expander('Settings')
with settings_expander:
st.caption("**Precision**")
user_precision=int(st.number_input('Number of digits after the decimal point',min_value=0,max_value=10,step=1,value=4, key = st.session_state['key']))
st.caption("**Help**")
sett_hints = st.checkbox('Show learning hints', value=False, key = st.session_state['key'])
st.caption("**Appearance**")
sett_wide_mode = st.checkbox('Wide mode', value=False, key = st.session_state['key'])
sett_theme = st.selectbox('Theme', ["Light", "Dark"], key = st.session_state['key'])
#sett_info = st.checkbox('Show methods info', value=False)
#sett_prec = st.number_input('Set the number of diggits for the output', min_value=0, max_value=8, value=2)
st.sidebar.markdown("")
# Check if wide mode
if sett_wide_mode:
fc.wide_mode_func()
# Check theme
if sett_theme == "Dark":
fc.theme_func_dark()
if sett_theme == "Light":
fc.theme_func_light()
fc.theme_func_dl_button()
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++++++++++++++++++++++++
# DATA PREPROCESSING & VISUALIZATION
st.header("**Panel data**")
st.markdown("Get your data ready for powerfull methods! Let STATY do data cleaning, variable transformations, visualizations and deliver you the stats you need. Specify your data processing preferences and start exploring your data stories right below... ")
# Check if enough data is available
if n_cols >= 2 and n_rows > 0:
st.empty()
else:
st.error("ERROR: Not enough data!")
return
# Specify entity and time
st.markdown("**Panel data specification**")
col1, col2 = st.columns(2)
with col1:
entity_na_warn = False
entity_options = df.columns
entity = st.selectbox("Select variable for entity", entity_options, key = st.session_state['key'])
with col2:
time_na_warn = False
time_options = df.columns
time_options = list(time_options[time_options.isin(df.drop(entity, axis = 1).columns)])
time = st.selectbox("Select variable for time", time_options, key = st.session_state['key'])
if np.where(df[entity].isnull())[0].size > 0:
entity_na_warn = "ERROR: The variable selected for entity has NAs!"
st.error(entity_na_warn)
if np.where(df[time].isnull())[0].size > 0:
time_na_warn = "ERROR: The variable selected for time has NAs!"
st.error(time_na_warn)
if df[time].dtypes != "float64" and df[time].dtypes != "float32" and df[time].dtypes != "int64" and df[time].dtypes != "int32":
time_na_warn = "ERROR: Time variable must be numeric!"
st.error(time_na_warn)
run_models = False
if time_na_warn == False and entity_na_warn == False:
data_empty_container = st.container()
with data_empty_container:
st.empty()
st.empty()
st.empty()
st.empty()
st.empty()
st.empty()
st.empty()
st.empty()
# Make sure time is numeric
df[time] = pd.to_numeric(df[time])
data_exploration_container2 = st.container()
with data_exploration_container2:
st.header("**Data screening and processing**")
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++
# DATA SUMMARY
# Main panel for data summary (pre)
#----------------------------------
dev_expander_dsPre = st.expander("Explore raw panel data info and stats", expanded = False)
st.empty()
with dev_expander_dsPre:
# Default data description:
if uploaded_data == None:
if st.checkbox("Show data description", value = False, key = st.session_state['key']):
st.markdown("**Data source:**")
st.markdown("This is the original 11-firm data set from Grunfeld’s Ph.D. thesis (*Grunfeld, 1958, The Determinants of Corporate Investment, Department of Economics, University of Chicago*). For more details see online complements for the article [The Grunfeld Data at 50] (https://www.zeileis.org/grunfeld/).")
st.markdown("**Citation:**")
st.markdown("<NAME>, <NAME> (2010). “The Grunfeld Data at 50,” German Economic Review, 11(4), 404-417. [doi:10.1111/j.1468-0475.2010.00513.x] (https://onlinelibrary.wiley.com/doi/abs/10.1111/j.1468-0475.2010.00513.x)")
st.markdown("**Variables in the dataset:**")
col1,col2=st.columns(2)
col1.write("invest")
col2.write("Gross investment, defined as additions to plant and equipment plus maintenance and repairs in millions of dollars deflated by the implicit price deflator of producers’ durable equipment (base 1947)")
col1,col2=st.columns(2)
col1.write("value")
col2.write("Market value of the firm, defined as the price of common shares at December 31 (or, for WH, IBM and CH, the average price of December 31 and January 31 of the following year) times the number of common shares outstanding plus price of preferred shares at December 31 (or average price of December 31 and January 31 of the following year) times number of preferred shares plus total book value of debt at December 31 in millions of dollars deflated by the implicit GNP price deflator (base 1947)")
col1,col2=st.columns(2)
col1.write("capital")
col2.write("Stock of plant and equipment, defined as the accumulated sum of net additions to plant and equipment deflated by the implicit price deflator for producers’ durable equipment (base 1947) minus depreciation allowance deflated by depreciation expense deflator (10 years moving average of wholesale price index of metals and metal products, base1947)")
col1,col2=st.columns(2)
col1.write("firm")
col2.write("General Motors (GM), US Steel (US), General Electric (GE), Chrysler (CH), Atlantic Refining (AR), IBM, Union Oil (UO), Westinghouse (WH), Goodyear (GY), Diamond Match (DM), American Steel (AS)")
col1,col2=st.columns(2)
col1.write("year")
col2.write("Year ranging from 1935 to 1954")
st.markdown("")
# Show raw data & data info
df_summary = fc.data_summary(df)
if st.checkbox("Show raw data", value = False, key = st.session_state['key']):
st.write(df)
#st.info("Data shape: "+ str(n_rows) + " rows and " + str(n_cols) + " columns")
st.write("Data shape: ", n_rows, " rows and ", n_cols, " columns")
if df[df.duplicated()].shape[0] > 0 or df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
check_nasAnddupl=st.checkbox("Show duplicates and NAs info", value = False, key = st.session_state['key'])
if check_nasAnddupl:
if df[df.duplicated()].shape[0] > 0:
st.write("Number of duplicates: ", df[df.duplicated()].shape[0])
st.write("Duplicate row index: ", ', '.join(map(str,list(df.index[df.duplicated()]))))
if df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
st.write("Number of rows with NAs: ", df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0])
st.write("Rows with NAs: ", ', '.join(map(str,list(pd.unique(np.where(df.isnull())[0])))))
# Show variable info
if st.checkbox('Show variable info', value = False, key = st.session_state['key']):
st.write(df_summary["Variable types"])
# Show summary statistics (raw data)
if st.checkbox('Show summary statistics (raw data)', value = False, key = st.session_state['key']):
st.write(df_summary["ALL"].style.set_precision(user_precision))
# Download link for summary statistics
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df_summary["Variable types"].to_excel(excel_file, sheet_name="variable_info")
df_summary["ALL"].to_excel(excel_file, sheet_name="summary_statistics")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Summary statistics__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download summary statistics</a>
""",
unsafe_allow_html=True)
st.write("")
if fc.get_mode(df).loc["n_unique"].any():
st.caption("** Mode is not unique.")
if sett_hints:
st.info(str(fc.learning_hints("de_summary_statistics")))
dev_expander_anovPre = st.expander("ANOVA for raw panel data", expanded = False)
with dev_expander_anovPre:
if df.shape[1] > 2:
# Target variable
target_var = st.selectbox('Select target variable ', df.drop([entity, time], axis = 1).columns, key = st.session_state['key'])
if df[target_var].dtypes == "int64" or df[target_var].dtypes == "float64":
class_var_options = df.columns
class_var_options = class_var_options[class_var_options.isin(df.drop(target_var, axis = 1).columns)]
clas_var = st.selectbox('Select classifier variable ', [entity, time], key = st.session_state['key'])
# Means and sd by entity
col1, col2 = st.columns(2)
with col1:
df_anova_woTime = df.drop([time], axis = 1)
df_grouped_ent = df_anova_woTime.groupby(entity)
st.write("Mean based on entity:")
st.write(df_grouped_ent.mean()[target_var])
st.write("")
with col2:
st.write("SD based on entity:")
st.write(df_grouped_ent.std()[target_var])
st.write("")
# Means and sd by time
col3, col4 = st.columns(2)
with col3:
df_anova_woEnt= df.drop([entity], axis = 1)
df_grouped_time = df_anova_woEnt.groupby(time)
counts_time = pd.DataFrame(df_grouped_time.count()[target_var])
counts_time.columns = ["count"]
st.write("Mean based on time:")
st.write(df_grouped_time.mean()[target_var])
st.write("")
with col4:
st.write("SD based on time:")
st.write(df_grouped_time.std()[target_var])
st.write("")
col9, col10 = st.columns(2)
with col9:
st.write("Boxplot grouped by entity:")
box_size1 = st.slider("Select box size", 1, 50, 5, key = st.session_state['key'])
# Grouped boxplot by entity
grouped_boxplot_data = pd.DataFrame()
grouped_boxplot_data[entity] = df[entity]
grouped_boxplot_data[time] = df[time]
grouped_boxplot_data["Index"] = df.index
grouped_boxplot_data[target_var] = df[target_var]
grouped_boxchart_ent = alt.Chart(grouped_boxplot_data, height = 300).mark_boxplot(size = box_size1, color = "#1f77b4", median = dict(color = "darkred")).encode(
x = alt.X(entity, scale = alt.Scale(zero = False)),
y = alt.Y(target_var, scale = alt.Scale(zero = False)),
tooltip = [target_var, entity, time, "Index"]
).configure_axis(
labelFontSize = 11,
titleFontSize = 12
)
st.altair_chart(grouped_boxchart_ent, use_container_width=True)
with col10:
st.write("Boxplot grouped by time:")
box_size2 = st.slider("Select box size ", 1, 50, 5, key = st.session_state['key'])
# Grouped boxplot by time
grouped_boxplot_data = pd.DataFrame()
grouped_boxplot_data[entity] = df[entity]
grouped_boxplot_data[time] = df[time]
grouped_boxplot_data["Index"] = df.index
grouped_boxplot_data[target_var] = df[target_var]
grouped_boxchart_time = alt.Chart(grouped_boxplot_data, height = 300).mark_boxplot(size = box_size2, color = "#1f77b4", median = dict(color = "darkred")).encode(
x = alt.X(time, scale = alt.Scale(domain = [min(df[time]), max(df[time])])),
y = alt.Y(target_var, scale = alt.Scale(zero = False)),
tooltip = [target_var, entity, time, "Index"]
).configure_axis(
labelFontSize = 11,
titleFontSize = 12
)
st.altair_chart(grouped_boxchart_time, use_container_width=True)
if sett_hints:
st.info(str(fc.learning_hints("de_anova_boxplot")))
st.write("")
# Count for entity and time
col5, col6 = st.columns(2)
with col5:
st.write("Number of observations per entity:")
counts_ent = pd.DataFrame(df_grouped_ent.count()[target_var])
counts_ent.columns = ["count"]
st.write(counts_ent.transpose())
with col6:
st.write("Number of observations per time:")
counts_time = pd.DataFrame(df_grouped_time.count()[target_var])
counts_time.columns = ["count"]
st.write(counts_time.transpose())
if sett_hints:
st.info(str(fc.learning_hints("de_anova_count")))
st.write("")
# ANOVA calculation
df_grouped = df[[target_var,clas_var]].groupby(clas_var)
overall_mean = (df_grouped.mean()*df_grouped.count()).sum()/df_grouped.count().sum()
dof_between = len(df_grouped.count())-1
dof_within = df_grouped.count().sum()-len(df_grouped.count())
dof_tot = dof_between + dof_within
SS_between = (((df_grouped.mean()-overall_mean)**2)*df_grouped.count()).sum()
SS_within = (df_grouped.var()*(df_grouped.count()-1)).sum()
SS_total = SS_between + SS_within
MS_between = SS_between/dof_between
MS_within = SS_within/dof_within
F_stat = MS_between/MS_within
p_value = scipy.stats.f.sf(F_stat, dof_between, dof_within)
anova_table=pd.DataFrame({
"DF": [dof_between, dof_within.values[0], dof_tot.values[0]],
"SS": [SS_between.values[0], SS_within.values[0], SS_total.values[0]],
"MS": [MS_between.values[0], MS_within.values[0], ""],
"F-statistic": [F_stat.values[0], "", ""],
"p-value": [p_value[0], "", ""]},
index = ["Between", "Within", "Total"],)
st.write("ANOVA:")
st.write(anova_table)
if sett_hints:
st.info(str(fc.learning_hints("de_anova_table")))
st.write("")
#Anova (OLS)
codes = pd.factorize(df[clas_var])[0]
ano_ols = sm.OLS(df[target_var], sm.add_constant(codes))
ano_ols_output = ano_ols.fit()
residuals = ano_ols_output.resid
col7, col8 = st.columns(2)
with col7:
# QQ-plot
st.write("Normal QQ-plot:")
st.write("")
st.write("")
st.write("")
st.write("")
st.write("")
st.write("")
qq_plot_data = pd.DataFrame()
qq_plot_data["StandResiduals"] = (residuals - residuals.mean())/residuals.std()
qq_plot_data["Index"] = df.index
qq_plot_data[entity] = df[entity]
qq_plot_data[time] = df[time]
qq_plot_data = qq_plot_data.sort_values(by = ["StandResiduals"])
qq_plot_data["Theoretical quantiles"] = stats.probplot(residuals, dist="norm")[0][0]
qq_plot = alt.Chart(qq_plot_data, height = 300).mark_circle(size=20).encode(
x = alt.X("Theoretical quantiles", title = "theoretical quantiles", scale = alt.Scale(domain = [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("StandResiduals", title = "stand. residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["StandResiduals", "Theoretical quantiles", entity, time, "Index"]
)
line = alt.Chart(
pd.DataFrame({"Theoretical quantiles": [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])], "StandResiduals": [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])]})).mark_line(size = 2, color = "darkred").encode(
alt.X("Theoretical quantiles"),
alt.Y("StandResiduals"),
)
st.altair_chart(qq_plot + line, use_container_width = True)
with col8:
# Residuals histogram
st.write("Residuals histogram:")
residuals_hist = pd.DataFrame(residuals)
residuals_hist.columns = ["residuals"]
binNo_res = st.slider("Select maximum number of bins ", 5, 100, 25, key = st.session_state['key'])
hist_plot_res = alt.Chart(residuals_hist, height = 300).mark_bar().encode(
x = alt.X("residuals", title = "residuals", bin = alt.BinParams(maxbins = binNo_res), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("count()", title = "count of records", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["count()", alt.Tooltip("residuals", bin = alt.BinParams(maxbins = binNo_res))]
)
st.altair_chart(hist_plot_res, use_container_width=True)
if sett_hints:
st.info(str(fc.learning_hints("de_anova_residuals")))
# Download link for ANOVA statistics
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df_grouped_ent.mean()[target_var].to_excel(excel_file, sheet_name="entity_mean")
df_grouped_ent.std()[target_var].to_excel(excel_file, sheet_name="entity_sd")
df_grouped_time.mean()[target_var].to_excel(excel_file, sheet_name="time_mean")
df_grouped_time.std()[target_var].to_excel(excel_file, sheet_name="time_sd")
counts_ent.transpose().to_excel(excel_file, sheet_name="entity_obs")
counts_time.transpose().to_excel(excel_file, sheet_name="time_obs")
anova_table.to_excel(excel_file, sheet_name="ANOVA table")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "ANOVA statistics__" + target_var + "__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download ANOVA statistics</a>
""",
unsafe_allow_html=True)
st.write("")
else:
st.error("ERROR: The target variable must be a numerical one!")
else: st.error("ERROR: No variables available for ANOVA!")
#++++++++++++++++++++++
# DATA PROCESSING
# Settings for data processing
#-------------------------------------
dev_expander_dm_sb = st.expander("Specify data processing preferences", expanded = False)
with dev_expander_dm_sb:
n_rows_wNAs = df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0]
n_rows_wNAs_pre_processing = "No"
if n_rows_wNAs > 0:
n_rows_wNAs_pre_processing = "Yes"
a1, a2, a3 = st.columns(3)
else: a1, a3 = st.columns(2)
sb_DM_dImp_num = None
sb_DM_dImp_other = None
sb_DM_delRows=None
sb_DM_keepRows=None
group_by_num = None
group_by_other = None
with a1:
#--------------------------------------------------------------------------------------
# DATA CLEANING
st.markdown("**Data cleaning**")
# Delete rows
delRows =st.selectbox('Delete rows with index ...', options=['-', 'greater', 'greater or equal', 'smaller', 'smaller or equal', 'equal', 'between'], key = st.session_state['key'])
if delRows!='-':
if delRows=='between':
row_1=st.number_input('Lower limit is', value=0, step=1, min_value= 0, max_value=len(df)-1, key = st.session_state['key'])
row_2=st.number_input('Upper limit is', value=2, step=1, min_value= 0, max_value=len(df)-1, key = st.session_state['key'])
if (row_1 + 1) < row_2 :
sb_DM_delRows=df.index[(df.index > row_1) & (df.index < row_2)]
elif (row_1 + 1) == row_2 :
st.warning("WARNING: No row is deleted!")
elif row_1 == row_2 :
st.warning("WARNING: No row is deleted!")
elif row_1 > row_2 :
st.error("ERROR: Lower limit must be smaller than upper limit!")
return
elif delRows=='equal':
sb_DM_delRows = st.multiselect("to...", df.index, key = st.session_state['key'])
else:
row_1=st.number_input('than...', step=1, value=1, min_value = 0, max_value=len(df)-1, key = st.session_state['key'])
if delRows=='greater':
sb_DM_delRows=df.index[df.index > row_1]
if row_1 == len(df)-1:
st.warning("WARNING: No row is deleted!")
elif delRows=='greater or equal':
sb_DM_delRows=df.index[df.index >= row_1]
if row_1 == 0:
st.error("ERROR: All rows are deleted!")
return
elif delRows=='smaller':
sb_DM_delRows=df.index[df.index < row_1]
if row_1 == 0:
st.warning("WARNING: No row is deleted!")
elif delRows=='smaller or equal':
sb_DM_delRows=df.index[df.index <= row_1]
if row_1 == len(df)-1:
st.error("ERROR: All rows are deleted!")
return
if sb_DM_delRows is not None:
df = df.loc[~df.index.isin(sb_DM_delRows)]
no_delRows=n_rows-df.shape[0]
# Keep rows
keepRows =st.selectbox('Keep rows with index ...', options=['-', 'greater', 'greater or equal', 'smaller', 'smaller or equal', 'equal', 'between'], key = st.session_state['key'])
if keepRows!='-':
if keepRows=='between':
row_1=st.number_input('Lower limit is', value=0, step=1, min_value= 0, max_value=len(df)-1, key = st.session_state['key'])
row_2=st.number_input('Upper limit is', value=2, step=1, min_value= 0, max_value=len(df)-1, key = st.session_state['key'])
if (row_1 + 1) < row_2 :
sb_DM_keepRows=df.index[(df.index > row_1) & (df.index < row_2)]
elif (row_1 + 1) == row_2 :
st.error("ERROR: No row is kept!")
return
elif row_1 == row_2 :
st.error("ERROR: No row is kept!")
return
elif row_1 > row_2 :
st.error("ERROR: Lower limit must be smaller than upper limit!")
return
elif keepRows=='equal':
sb_DM_keepRows = st.multiselect("to...", df.index, key = st.session_state['key'])
else:
row_1=st.number_input('than...', step=1, value=1, min_value = 0, max_value=len(df)-1, key = st.session_state['key'])
if keepRows=='greater':
sb_DM_keepRows=df.index[df.index > row_1]
if row_1 == len(df)-1:
st.error("ERROR: No row is kept!")
return
elif keepRows=='greater or equal':
sb_DM_keepRows=df.index[df.index >= row_1]
if row_1 == 0:
st.warning("WARNING: All rows are kept!")
elif keepRows=='smaller':
sb_DM_keepRows=df.index[df.index < row_1]
if row_1 == 0:
st.error("ERROR: No row is kept!")
return
elif keepRows=='smaller or equal':
sb_DM_keepRows=df.index[df.index <= row_1]
if sb_DM_keepRows is not None:
df = df.loc[df.index.isin(sb_DM_keepRows)]
no_keptRows=df.shape[0]
# Delete columns
sb_DM_delCols = st.multiselect("Select columns to delete", df.drop([entity, time], axis = 1).columns, key = st.session_state['key'])
df = df.loc[:,~df.columns.isin(sb_DM_delCols)]
# Keep columns
sb_DM_keepCols = st.multiselect("Select columns to keep", df.drop([entity, time], axis = 1).columns, key = st.session_state['key'])
if len(sb_DM_keepCols) > 0:
df = df.loc[:,df.columns.isin([entity, time] + sb_DM_keepCols)]
# Delete duplicates if any exist
if df[df.duplicated()].shape[0] > 0:
sb_DM_delDup = st.selectbox("Delete duplicate rows", ["No", "Yes"], key = st.session_state['key'])
if sb_DM_delDup == "Yes":
n_rows_dup = df[df.duplicated()].shape[0]
df = df.drop_duplicates()
elif df[df.duplicated()].shape[0] == 0:
sb_DM_delDup = "No"
# Delete rows with NA if any exist
n_rows_wNAs = df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0]
if n_rows_wNAs > 0:
sb_DM_delRows_wNA = st.selectbox("Delete rows with NAs", ["No", "Yes"], key = st.session_state['key'])
if sb_DM_delRows_wNA == "Yes":
df = df.dropna()
elif n_rows_wNAs == 0:
sb_DM_delRows_wNA = "No"
# Filter data
st.markdown("**Data filtering**")
filter_var = st.selectbox('Filter your data by a variable...', list('-')+ list(df.columns), key = st.session_state['key'])
if filter_var !='-':
if df[filter_var].dtypes=="int64" or df[filter_var].dtypes=="float64":
if df[filter_var].dtypes=="float64":
filter_format="%.8f"
else:
filter_format=None
user_filter=st.selectbox('Select values that are ...', options=['greater','greater or equal','smaller','smaller or equal', 'equal','between'], key = st.session_state['key'])
if user_filter=='between':
filter_1=st.number_input('Lower limit is', format=filter_format, value=df[filter_var].min(), min_value=df[filter_var].min(), max_value=df[filter_var].max(), key = st.session_state['key'])
filter_2=st.number_input('Upper limit is', format=filter_format, value=df[filter_var].max(), min_value=df[filter_var].min(), max_value=df[filter_var].max(), key = st.session_state['key'])
#reclassify values:
if filter_1 < filter_2 :
df = df[(df[filter_var] > filter_1) & (df[filter_var] < filter_2)]
if len(df) == 0:
st.error("ERROR: No data available for the selected limits!")
return
elif filter_1 >= filter_2 :
st.error("ERROR: Lower limit must be smaller than upper limit!")
return
elif user_filter=='equal':
filter_1=st.multiselect('to... ', options=df[filter_var].values, key = st.session_state['key'])
if len(filter_1)>0:
df = df.loc[df[filter_var].isin(filter_1)]
else:
filter_1=st.number_input('than... ',format=filter_format, value=df[filter_var].min(), min_value=df[filter_var].min(), max_value=df[filter_var].max(), key = st.session_state['key'])
#reclassify values:
if user_filter=='greater':
df = df[df[filter_var] > filter_1]
elif user_filter=='greater or equal':
df = df[df[filter_var] >= filter_1]
elif user_filter=='smaller':
df= df[df[filter_var]< filter_1]
elif user_filter=='smaller or equal':
df = df[df[filter_var] <= filter_1]
if len(df) == 0:
st.error("ERROR: No data available for the selected value!")
return
elif len(df) == n_rows:
st.warning("WARNING: Data are not filtered for this value!")
else:
filter_1=st.multiselect('Filter your data by a value...', (df[filter_var]).unique(), key = st.session_state['key'])
if len(filter_1)>0:
df = df.loc[df[filter_var].isin(filter_1)]
if n_rows_wNAs_pre_processing == "Yes":
with a2:
#--------------------------------------------------------------------------------------
# DATA IMPUTATION
# Select data imputation method (only if rows with NA not deleted)
if sb_DM_delRows_wNA == "No" and n_rows_wNAs > 0:
st.markdown("**Data imputation**")
sb_DM_dImp_choice = st.selectbox("Replace entries with NA", ["No", "Yes"], key = st.session_state['key'])
if sb_DM_dImp_choice == "Yes":
# Numeric variables
sb_DM_dImp_num = st.selectbox("Imputation method for numeric variables", ["Mean", "Median", "Random value"], key = st.session_state['key'])
# Other variables
sb_DM_dImp_other = st.selectbox("Imputation method for other variables", ["Mode", "Random value"], key = st.session_state['key'])
group_by_num = st.selectbox("Group imputation by", ["None", "Entity", "Time"], key = st.session_state['key'])
group_by_other = group_by_num
df = fc.data_impute_panel(df, sb_DM_dImp_num, sb_DM_dImp_other, group_by_num, group_by_other, entity, time)
else:
st.markdown("**Data imputation**")
st.write("")
st.info("No NAs in data set!")
with a3:
#--------------------------------------------------------------------------------------
# DATA TRANSFORMATION
st.markdown("**Data transformation**")
# Select columns for different transformation types
transform_options = df.drop([entity, time], axis = 1).select_dtypes([np.number]).columns
numCat_options = df.drop([entity, time], axis = 1).columns
sb_DM_dTrans_log = st.multiselect("Select columns to transform with log", transform_options, key = st.session_state['key'])
if sb_DM_dTrans_log is not None:
df = fc.var_transform_log(df, sb_DM_dTrans_log)
sb_DM_dTrans_sqrt = st.multiselect("Select columns to transform with sqrt", transform_options, key = st.session_state['key'])
if sb_DM_dTrans_sqrt is not None:
df = fc.var_transform_sqrt(df, sb_DM_dTrans_sqrt)
sb_DM_dTrans_square = st.multiselect("Select columns for squaring", transform_options, key = st.session_state['key'])
if sb_DM_dTrans_square is not None:
df = fc.var_transform_square(df, sb_DM_dTrans_square)
sb_DM_dTrans_cent = st.multiselect("Select columns for centering ", transform_options, key = st.session_state['key'])
if sb_DM_dTrans_cent is not None:
df = fc.var_transform_cent(df, sb_DM_dTrans_cent)
sb_DM_dTrans_stand = st.multiselect("Select columns for standardization", transform_options, key = st.session_state['key'])
if sb_DM_dTrans_stand is not None:
df = fc.var_transform_stand(df, sb_DM_dTrans_stand)
sb_DM_dTrans_norm = st.multiselect("Select columns for normalization", transform_options, key = st.session_state['key'])
if sb_DM_dTrans_norm is not None:
df = fc.var_transform_norm(df, sb_DM_dTrans_norm)
sb_DM_dTrans_numCat = st.multiselect("Select columns for numeric categorization ", numCat_options, key = st.session_state['key'])
if sb_DM_dTrans_numCat:
if not df[sb_DM_dTrans_numCat].columns[df[sb_DM_dTrans_numCat].isna().any()].tolist():
sb_DM_dTrans_numCat_sel = st.multiselect("Select variables for manual categorization ", sb_DM_dTrans_numCat, key = st.session_state['key'])
if sb_DM_dTrans_numCat_sel:
for var in sb_DM_dTrans_numCat_sel:
if df[var].unique().size > 5:
st.error("ERROR: Selected variable has too many categories (>5): " + str(var))
return
else:
manual_cats = pd.DataFrame(index = range(0, df[var].unique().size), columns=["Value", "Cat"])
text = "Category for "
# Save manually selected categories
for i in range(0, df[var].unique().size):
text1 = text + str(var) + ": " + str(sorted(df[var].unique())[i])
man_cat = st.number_input(text1, value = 0, min_value=0, key = st.session_state['key'])
manual_cats.loc[i]["Value"] = sorted(df[var].unique())[i]
manual_cats.loc[i]["Cat"] = man_cat
new_var_name = "numCat_" + var
new_var = pd.DataFrame(index = df.index, columns = [new_var_name])
for c in df[var].index:
if pd.isnull(df[var][c]) == True:
new_var.loc[c, new_var_name] = np.nan
elif pd.isnull(df[var][c]) == False:
new_var.loc[c, new_var_name] = int(manual_cats[manual_cats["Value"] == df[var][c]]["Cat"])
df[new_var_name] = new_var.astype('int64')
# Exclude columns with manual categorization from standard categorization
numCat_wo_manCat = [var for var in sb_DM_dTrans_numCat if var not in sb_DM_dTrans_numCat_sel]
df = fc.var_transform_numCat(df, numCat_wo_manCat)
else:
df = fc.var_transform_numCat(df, sb_DM_dTrans_numCat)
else:
col_with_na = df[sb_DM_dTrans_numCat].columns[df[sb_DM_dTrans_numCat].isna().any()].tolist()
st.error("ERROR: Please select columns without NAs: " + ', '.join(map(str,col_with_na)))
return
else:
sb_DM_dTrans_numCat = None
sb_DM_dTrans_mult = st.number_input("Number of variable multiplications ", value = 0, min_value=0, key = st.session_state['key'])
if sb_DM_dTrans_mult != 0:
multiplication_pairs = pd.DataFrame(index = range(0, sb_DM_dTrans_mult), columns=["Var1", "Var2"])
text = "Multiplication pair"
for i in range(0, sb_DM_dTrans_mult):
text1 = text + " " + str(i+1)
text2 = text + " " + str(i+1) + " "
mult_var1 = st.selectbox(text1, transform_options, key = st.session_state['key'])
mult_var2 = st.selectbox(text2, transform_options, key = st.session_state['key'])
multiplication_pairs.loc[i]["Var1"] = mult_var1
multiplication_pairs.loc[i]["Var2"] = mult_var2
fc.var_transform_mult(df, mult_var1, mult_var2)
sb_DM_dTrans_div = st.number_input("Number of variable divisions ", value = 0, min_value=0, key = st.session_state['key'])
if sb_DM_dTrans_div != 0:
division_pairs = pd.DataFrame(index = range(0, sb_DM_dTrans_div), columns=["Var1", "Var2"])
text = "Division pair"
for i in range(0, sb_DM_dTrans_div):
text1 = text + " " + str(i+1) + " (numerator)"
text2 = text + " " + str(i+1) + " (denominator)"
div_var1 = st.selectbox(text1, transform_options, key = st.session_state['key'])
div_var2 = st.selectbox(text2, transform_options, key = st.session_state['key'])
division_pairs.loc[i]["Var1"] = div_var1
division_pairs.loc[i]["Var2"] = div_var2
fc.var_transform_div(df, div_var1, div_var2)
data_transform=st.checkbox("Transform data in Excel?", value=False)
if data_transform==True:
st.info("Press the button to open your data in Excel. Don't forget to save your result as a csv or a txt file!")
# Download link
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df.to_excel(excel_file, sheet_name="data",index=False)
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Data_transformation__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Transform your data in Excel</a>
""",
unsafe_allow_html=True)
st.write("")
#--------------------------------------------------------------------------------------
# PROCESSING SUMMARY
if st.checkbox('Show a summary of my data processing preferences', value = False, key = st.session_state['key']):
st.markdown("Summary of data changes:")
#--------------------------------------------------------------------------------------
# DATA CLEANING
# Rows
if sb_DM_delRows is not None and delRows!='-' :
if no_delRows > 1:
st.write("-", no_delRows, " rows were deleted!")
elif no_delRows == 1:
st.write("-",no_delRows, " row was deleted!")
elif no_delRows == 0:
st.write("- No row was deleted!")
else:
st.write("- No row was deleted!")
if sb_DM_keepRows is not None and keepRows!='-' :
if no_keptRows > 1:
st.write("-", no_keptRows, " rows are kept!")
elif no_keptRows == 1:
st.write("-",no_keptRows, " row is kept!")
elif no_keptRows == 0:
st.write("- All rows are kept!")
else:
st.write("- All rows are kept!")
# Columns
if len(sb_DM_delCols) > 1:
st.write("-", len(sb_DM_delCols), " columns were manually deleted:", ', '.join(sb_DM_delCols))
elif len(sb_DM_delCols) == 1:
st.write("-",len(sb_DM_delCols), " column was manually deleted:", str(sb_DM_delCols[0]))
elif len(sb_DM_delCols) == 0:
st.write("- No column was manually deleted!")
if len(sb_DM_keepCols) > 1:
st.write("-", len(sb_DM_keepCols), " columns are kept:", ', '.join(sb_DM_keepCols))
elif len(sb_DM_keepCols) == 1:
st.write("-",len(sb_DM_keepCols), " column is kept:", str(sb_DM_keepCols[0]))
elif len(sb_DM_keepCols) == 0:
st.write("- All columns are kept!")
# Duplicates
if sb_DM_delDup == "Yes":
if n_rows_dup > 1:
st.write("-", n_rows_dup, " duplicate rows were deleted!")
elif n_rows_dup == 1:
st.write("-", n_rows_dup, "duplicate row was deleted!")
else:
st.write("- No duplicate row was deleted!")
# NAs
if sb_DM_delRows_wNA == "Yes":
if n_rows_wNAs > 1:
st.write("-", n_rows_wNAs, "rows with NAs were deleted!")
elif n_rows_wNAs == 1:
st.write("-", n_rows - n_rows_wNAs, "row with NAs was deleted!")
else:
st.write("- No row with NAs was deleted!")
# Filter
if filter_var != "-":
if df[filter_var].dtypes=="int64" or df[filter_var].dtypes=="float64":
if isinstance(filter_1, list):
if len(filter_1) == 0:
st.write("-", " Data was not filtered!")
elif len(filter_1) > 0:
st.write("-", " Data filtered by:", str(filter_var))
elif filter_1 is not None:
st.write("-", " Data filtered by:", str(filter_var))
else:
st.write("-", " Data was not filtered!")
elif len(filter_1)>0:
st.write("-", " Data filtered by:", str(filter_var))
elif len(filter_1) == 0:
st.write("-", " Data was not filtered!")
else:
st.write("-", " Data was not filtered!")
#--------------------------------------------------------------------------------------
# DATA IMPUTATION
if sb_DM_delRows_wNA == "No" and n_rows_wNAs > 0:
st.write("- Data imputation method for numeric variables:", sb_DM_dImp_num)
st.write("- Data imputation method for other variable types:", sb_DM_dImp_other)
st.write("- Imputation grouped by:", group_by_num)
#--------------------------------------------------------------------------------------
# DATA TRANSFORMATION
# log
if len(sb_DM_dTrans_log) > 1:
st.write("-", len(sb_DM_dTrans_log), " columns were log-transformed:", ', '.join(sb_DM_dTrans_log))
elif len(sb_DM_dTrans_log) == 1:
st.write("-",len(sb_DM_dTrans_log), " column was log-transformed:", sb_DM_dTrans_log[0])
elif len(sb_DM_dTrans_log) == 0:
st.write("- No column was log-transformed!")
# sqrt
if len(sb_DM_dTrans_sqrt) > 1:
st.write("-", len(sb_DM_dTrans_sqrt), " columns were sqrt-transformed:", ', '.join(sb_DM_dTrans_sqrt))
elif len(sb_DM_dTrans_sqrt) == 1:
st.write("-",len(sb_DM_dTrans_sqrt), " column was sqrt-transformed:", sb_DM_dTrans_sqrt[0])
elif len(sb_DM_dTrans_sqrt) == 0:
st.write("- No column was sqrt-transformed!")
# square
if len(sb_DM_dTrans_square) > 1:
st.write("-", len(sb_DM_dTrans_square), " columns were squared:", ', '.join(sb_DM_dTrans_square))
elif len(sb_DM_dTrans_square) == 1:
st.write("-",len(sb_DM_dTrans_square), " column was squared:", sb_DM_dTrans_square[0])
elif len(sb_DM_dTrans_square) == 0:
st.write("- No column was squared!")
# centering
if len(sb_DM_dTrans_cent) > 1:
st.write("-", len(sb_DM_dTrans_cent), " columns were centered:", ', '.join(sb_DM_dTrans_cent))
elif len(sb_DM_dTrans_cent) == 1:
st.write("-",len(sb_DM_dTrans_cent), " column was centered:", sb_DM_dTrans_cent[0])
elif len(sb_DM_dTrans_cent) == 0:
st.write("- No column was centered!")
# standardize
if len(sb_DM_dTrans_stand) > 1:
st.write("-", len(sb_DM_dTrans_stand), " columns were standardized:", ', '.join(sb_DM_dTrans_stand))
elif len(sb_DM_dTrans_stand) == 1:
st.write("-",len(sb_DM_dTrans_stand), " column was standardized:", sb_DM_dTrans_stand[0])
elif len(sb_DM_dTrans_stand) == 0:
st.write("- No column was standardized!")
# normalize
if len(sb_DM_dTrans_norm) > 1:
st.write("-", len(sb_DM_dTrans_norm), " columns were normalized:", ', '.join(sb_DM_dTrans_norm))
elif len(sb_DM_dTrans_norm) == 1:
st.write("-",len(sb_DM_dTrans_norm), " column was normalized:", sb_DM_dTrans_norm[0])
elif len(sb_DM_dTrans_norm) == 0:
st.write("- No column was normalized!")
# numeric category
if sb_DM_dTrans_numCat is not None:
if len(sb_DM_dTrans_numCat) > 1:
st.write("-", len(sb_DM_dTrans_numCat), " columns were transformed to numeric categories:", ', '.join(sb_DM_dTrans_numCat))
elif len(sb_DM_dTrans_numCat) == 1:
st.write("-",len(sb_DM_dTrans_numCat), " column was transformed to numeric categories:", sb_DM_dTrans_numCat[0])
elif sb_DM_dTrans_numCat is None:
st.write("- No column was transformed to numeric categories!")
# multiplication
if sb_DM_dTrans_mult != 0:
st.write("-", "Number of variable multiplications: ", sb_DM_dTrans_mult)
elif sb_DM_dTrans_mult == 0:
st.write("- No variables were multiplied!")
# division
if sb_DM_dTrans_div != 0:
st.write("-", "Number of variable divisions: ", sb_DM_dTrans_div)
elif sb_DM_dTrans_div == 0:
st.write("- No variables were divided!")
st.write("")
st.write("")
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++
# UPDATED DATA SUMMARY
# Show only if changes were made
if any(v for v in [sb_DM_delCols, sb_DM_dImp_num, sb_DM_dImp_other, sb_DM_dTrans_log, sb_DM_dTrans_sqrt, sb_DM_dTrans_square, sb_DM_dTrans_cent, sb_DM_dTrans_stand, sb_DM_dTrans_norm, sb_DM_dTrans_numCat ] if v is not None) or sb_DM_delDup == "Yes" or sb_DM_delRows_wNA == "Yes" or sb_DM_dTrans_mult != 0 or sb_DM_dTrans_div != 0 or filter_var != "-" or delRows!='-' or keepRows!='-' or len(sb_DM_keepCols) > 0:
dev_expander_dsPost = st.expander("Explore cleaned and transformed panel data info and stats", expanded = False)
with dev_expander_dsPost:
if df.shape[1] > 2 and df.shape[0] > 0:
# Show cleaned and transformed data & data info
df_summary_post = fc.data_summary(df)
if st.checkbox("Show cleaned and transformed data", value = False):
n_rows_post = df.shape[0]
n_cols_post = df.shape[1]
st.dataframe(df)
st.write("Data shape: ", n_rows_post, "rows and ", n_cols_post, "columns")
# Download transformed data:
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df.to_excel(excel_file, sheet_name="Clean. and transf. data")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "CleanedTransfData__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download cleaned and transformed data</a>
""",
unsafe_allow_html=True)
st.write("")
if df[df.duplicated()].shape[0] > 0 or df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
check_nasAnddupl2 = st.checkbox("Show duplicates and NAs info (processed)", value = False)
if check_nasAnddupl2:
index_c = []
for c in df.columns:
for r in df.index:
if pd.isnull(df[c][r]):
index_c.append(r)
if df[df.duplicated()].shape[0] > 0:
st.write("Number of duplicates: ", df[df.duplicated()].shape[0])
st.write("Duplicate row index: ", ', '.join(map(str,list(df.index[df.duplicated()]))))
if df.iloc[list(pd.unique(np.where(df.isnull())[0]))].shape[0] > 0:
st.write("Number of rows with NAs: ", len(pd.unique(sorted(index_c))))
st.write("Rows with NAs: ", ', '.join(map(str,list(pd.unique(sorted(index_c))))))
# Show cleaned and transformed variable info
if st.checkbox("Show cleaned and transformed variable info", value = False):
st.write(df_summary_post["Variable types"])
# Show summary statistics (cleaned and transformed data)
if st.checkbox('Show summary statistics (cleaned and transformed data)', value = False):
st.write(df_summary_post["ALL"].style.set_precision(user_precision))
# Download link for cleaned data statistics
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df.to_excel(excel_file, sheet_name="cleaned_data")
df_summary_post["Variable types"].to_excel(excel_file, sheet_name="cleaned_variable_info")
df_summary_post["ALL"].to_excel(excel_file, sheet_name="cleaned_summary_statistics")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Cleaned data summary statistics_panel_" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download cleaned data summary statistics</a>
""",
unsafe_allow_html=True)
st.write("")
if fc.get_mode(df).loc["n_unique"].any():
st.caption("** Mode is not unique.")
if sett_hints:
st.info(str(fc.learning_hints("de_summary_statistics")))
else:
st.error("ERROR: No data available for preprocessing!")
return
dev_expander_anovPost = st.expander("ANOVA for cleaned and transformed panel data", expanded = False)
with dev_expander_anovPost:
if df.shape[1] > 2 and df.shape[0] > 0:
# Target variable
target_var2 = st.selectbox('Select target variable', df.drop([entity, time], axis = 1).columns)
if df[target_var2].dtypes == "int64" or df[target_var2].dtypes == "float64":
class_var_options = df.columns
class_var_options = class_var_options[class_var_options.isin(df.drop(target_var2, axis = 1).columns)]
clas_var2 = st.selectbox('Select classifier variable', [entity, time],)
# Means and sd by entity
col1, col2 = st.columns(2)
with col1:
df_anova_woTime = df.drop([time], axis = 1)
df_grouped_ent = df_anova_woTime.groupby(entity)
st.write("Mean based on entity:")
st.write(df_grouped_ent.mean()[target_var2])
st.write("")
with col2:
st.write("SD based on entity:")
st.write(df_grouped_ent.std()[target_var2])
st.write("")
# Means and sd by time
col3, col4 = st.columns(2)
with col3:
df_anova_woEnt= df.drop([entity], axis = 1)
df_grouped_time = df_anova_woEnt.groupby(time)
counts_time = pd.DataFrame(df_grouped_time.count()[target_var2])
counts_time.columns = ["count"]
st.write("Mean based on time:")
st.write(df_grouped_time.mean()[target_var2])
st.write("")
with col4:
st.write("SD based on time:")
st.write(df_grouped_time.std()[target_var2])
st.write("")
col9, col10 = st.columns(2)
with col9:
st.write("Boxplot grouped by entity:")
box_size1 = st.slider("Select box size ", 1, 50, 5)
# Grouped boxplot by entity
grouped_boxplot_data = pd.DataFrame()
grouped_boxplot_data[entity] = df[entity]
grouped_boxplot_data[time] = df[time]
grouped_boxplot_data["Index"] = df.index
grouped_boxplot_data[target_var2] = df[target_var2]
grouped_boxchart_ent = alt.Chart(grouped_boxplot_data, height = 300).mark_boxplot(size = box_size1, color = "#1f77b4", median = dict(color = "darkred")).encode(
x = alt.X(entity, scale = alt.Scale(zero = False)),
y = alt.Y(target_var2, scale = alt.Scale(zero = False)),
tooltip = [target_var2, entity, time, "Index"]
).configure_axis(
labelFontSize = 11,
titleFontSize = 12
)
st.altair_chart(grouped_boxchart_ent, use_container_width=True)
with col10:
st.write("Boxplot grouped by time:")
box_size2 = st.slider("Select box size ", 1, 50, 5)
# Grouped boxplot by time
grouped_boxplot_data = pd.DataFrame()
grouped_boxplot_data[time] = df[time]
grouped_boxplot_data[entity] = df[entity]
grouped_boxplot_data["Index"] = df.index
grouped_boxplot_data[target_var2] = df[target_var2]
grouped_boxchart_time = alt.Chart(grouped_boxplot_data, height = 300).mark_boxplot(size = box_size2, color = "#1f77b4", median = dict(color = "darkred")).encode(
x = alt.X(time, scale = alt.Scale(domain = [min(df[time]), max(df[time])])),
y = alt.Y(target_var2, scale = alt.Scale(zero = False)),
tooltip = [target_var2, entity, time, "Index"]
).configure_axis(
labelFontSize = 11,
titleFontSize = 12
)
st.altair_chart(grouped_boxchart_time, use_container_width=True)
if sett_hints:
st.info(str(fc.learning_hints("de_anova_boxplot")))
st.write("")
# Count for entity and time
col5, col6 = st.columns(2)
with col5:
st.write("Number of observations per entity:")
counts_ent = pd.DataFrame(df_grouped_ent.count()[target_var2])
counts_ent.columns = ["count"]
st.write(counts_ent.transpose())
with col6:
st.write("Number of observations per time:")
counts_time = pd.DataFrame(df_grouped_time.count()[target_var2])
counts_time.columns = ["count"]
st.write(counts_time.transpose())
if sett_hints:
st.info(str(fc.learning_hints("de_anova_count")))
st.write("")
# ANOVA calculation
df_grouped = df[[target_var2,clas_var2]].groupby(clas_var2)
overall_mean = (df_grouped.mean()*df_grouped.count()).sum()/df_grouped.count().sum()
dof_between = len(df_grouped.count())-1
dof_within = df_grouped.count().sum()-len(df_grouped.count())
dof_tot = dof_between + dof_within
SS_between = (((df_grouped.mean()-overall_mean)**2)*df_grouped.count()).sum()
SS_within = (df_grouped.var()*(df_grouped.count()-1)).sum()
SS_total = SS_between + SS_within
MS_between = SS_between/dof_between
MS_within = SS_within/dof_within
F_stat = MS_between/MS_within
p_value = scipy.stats.f.sf(F_stat, dof_between, dof_within)
anova_table=pd.DataFrame({
"DF": [dof_between, dof_within.values[0], dof_tot.values[0]],
"SS": [SS_between.values[0], SS_within.values[0], SS_total.values[0]],
"MS": [MS_between.values[0], MS_within.values[0], ""],
"F-statistic": [F_stat.values[0], "", ""],
"p-value": [p_value[0], "", ""]},
index = ["Between", "Within", "Total"],)
st.write("ANOVA:")
st.write(anova_table)
if sett_hints:
st.info(str(fc.learning_hints("de_anova_table")))
st.write("")
#Anova (OLS)
codes = pd.factorize(df[clas_var2])[0]
ano_ols = sm.OLS(df[target_var2], sm.add_constant(codes))
ano_ols_output = ano_ols.fit()
residuals = ano_ols_output.resid
col7, col8 = st.columns(2)
with col7:
# QQ-plot
st.write("Normal QQ-plot:")
st.write("")
st.write("")
st.write("")
st.write("")
st.write("")
st.write("")
qq_plot_data = pd.DataFrame()
qq_plot_data["StandResiduals"] = (residuals - residuals.mean())/residuals.std()
qq_plot_data["Index"] = df.index
qq_plot_data[entity] = df[entity]
qq_plot_data[time] = df[time]
qq_plot_data = qq_plot_data.sort_values(by = ["StandResiduals"])
qq_plot_data["Theoretical quantiles"] = stats.probplot(residuals, dist="norm")[0][0]
qq_plot = alt.Chart(qq_plot_data, height = 300).mark_circle(size=20).encode(
x = alt.X("Theoretical quantiles", title = "theoretical quantiles", scale = alt.Scale(domain = [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("StandResiduals", title = "stand. residuals", scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["StandResiduals", "Theoretical quantiles", entity, time, "Index"]
)
line = alt.Chart(
pd.DataFrame({"Theoretical quantiles": [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])], "StandResiduals": [min(qq_plot_data["Theoretical quantiles"]), max(qq_plot_data["Theoretical quantiles"])]})).mark_line(size = 2, color = "darkred").encode(
alt.X("Theoretical quantiles"),
alt.Y("StandResiduals"),
)
st.altair_chart(qq_plot + line, use_container_width = True)
with col8:
# Residuals histogram
st.write("Residuals histogram:")
residuals_hist = pd.DataFrame(residuals)
residuals_hist.columns = ["residuals"]
binNo_res2 = st.slider("Select maximum number of bins ", 5, 100, 25)
hist_plot = alt.Chart(residuals_hist, height = 300).mark_bar().encode(
x = alt.X("residuals", title = "residuals", bin = alt.BinParams(maxbins = binNo_res2), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("count()", title = "count of records", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["count()", alt.Tooltip("residuals", bin = alt.BinParams(maxbins = binNo_res2))]
)
st.altair_chart(hist_plot, use_container_width=True)
if sett_hints:
st.info(str(fc.learning_hints("de_anova_residuals")))
# Download link for ANOVA statistics
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df_grouped_ent.mean()[target_var2].to_excel(excel_file, sheet_name="entity_mean")
df_grouped_ent.std()[target_var2].to_excel(excel_file, sheet_name="entity_sd")
df_grouped_time.mean()[target_var2].to_excel(excel_file, sheet_name="time_mean")
df_grouped_time.std()[target_var2].to_excel(excel_file, sheet_name="time_sd")
counts_ent.transpose().to_excel(excel_file, sheet_name="entity_obs")
counts_time.transpose().to_excel(excel_file, sheet_name="time_obs")
anova_table.to_excel(excel_file, sheet_name="ANOVA table")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Cleaned ANOVA statistics__" + target_var2 + "__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download cleaned ANOVA statistics</a>
""",
unsafe_allow_html=True)
st.write("")
else:
st.error("ERROR: The target variable must be a numerical one!")
else:
st.error("ERROR: No data available for ANOVA!")
return
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++
# DATA VISUALIZATION
data_visualization_container = st.container()
with data_visualization_container:
#st.write("")
st.write("")
st.write("")
st.header("**Data visualization**")
dev_expander_dv = st.expander("Explore visualization types", expanded = False)
with dev_expander_dv:
if df.shape[1] > 2 and df.shape[0] > 0:
st.write('**Variable selection**')
varl_sel_options = df.columns
varl_sel_options = varl_sel_options[varl_sel_options.isin(df.drop([entity, time], axis = 1).columns)]
var_sel = st.selectbox('Select variable for visualizations', varl_sel_options, key = st.session_state['key'])
if df[var_sel].dtypes == "float64" or df[var_sel].dtypes == "float32" or df[var_sel].dtypes == "int64" or df[var_sel].dtypes == "int32":
a4, a5 = st.columns(2)
with a4:
st.write('**Scatterplot with LOESS line**')
yy_options = df.columns
yy_options = yy_options[yy_options.isin(df.drop([entity, time], axis = 1).columns)]
yy = st.selectbox('Select variable for y-axis', yy_options, key = st.session_state['key'])
if df[yy].dtypes == "float64" or df[yy].dtypes == "float32" or df[yy].dtypes == "int64" or df[yy].dtypes == "int32":
fig_data = pd.DataFrame()
fig_data[yy] = df[yy]
fig_data[var_sel] = df[var_sel]
fig_data["Index"] = df.index
fig_data[entity] = df[entity]
fig_data[time] = df[time]
fig = alt.Chart(fig_data).mark_circle().encode(
x = alt.X(var_sel, scale = alt.Scale(domain = [min(fig_data[var_sel]), max(fig_data[var_sel])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y(yy, scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [yy, var_sel, entity, time, "Index"]
)
st.altair_chart(fig + fig.transform_loess(var_sel, yy).mark_line(size = 2, color = "darkred"), use_container_width=True)
if sett_hints:
st.info(str(fc.learning_hints("dv_scatterplot")))
else: st.error("ERROR: Please select a numeric variable for the y-axis!")
with a5:
st.write('**Histogram**')
binNo = st.slider("Select maximum number of bins", 5, 100, 25, key = st.session_state['key'])
fig2 = alt.Chart(df).mark_bar().encode(
x = alt.X(var_sel, title = var_sel + " (binned)", bin = alt.BinParams(maxbins = binNo), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y("count()", title = "count of records", axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = ["count()", alt.Tooltip(var_sel, bin = alt.BinParams(maxbins = binNo))]
)
st.altair_chart(fig2, use_container_width=True)
if sett_hints:
st.info(str(fc.learning_hints("dv_histogram")))
a6, a7 = st.columns(2)
with a6:
st.write('**Boxplot**')
# Boxplot
boxplot_data = pd.DataFrame()
boxplot_data[var_sel] = df[var_sel]
boxplot_data["Index"] = df.index
boxplot_data[entity] = df[entity]
boxplot_data[time] = df[time]
boxplot = alt.Chart(boxplot_data).mark_boxplot(size = 100, color = "#1f77b4", median = dict(color = "darkred")).encode(
y = alt.Y(var_sel, scale = alt.Scale(zero = False)),
tooltip = [var_sel, entity, time, "Index"]
).configure_axis(
labelFontSize = 11,
titleFontSize = 12
)
st.altair_chart(boxplot, use_container_width=True)
if sett_hints:
st.info(str(fc.learning_hints("dv_boxplot")))
with a7:
st.write("**QQ-plot**")
var_values = df[var_sel]
qqplot_data = pd.DataFrame()
qqplot_data[var_sel] = var_values
qqplot_data["Index"] = df.index
qqplot_data[entity] = df[entity]
qqplot_data[time] = df[time]
qqplot_data = qqplot_data.sort_values(by = [var_sel])
qqplot_data["Theoretical quantiles"] = stats.probplot(var_values, dist="norm")[0][0]
qqplot = alt.Chart(qqplot_data).mark_circle(size=20).encode(
x = alt.X("Theoretical quantiles", title = "theoretical quantiles", scale = alt.Scale(domain = [min(qqplot_data["Theoretical quantiles"]), max(qqplot_data["Theoretical quantiles"])]), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
y = alt.Y(var_sel, title = str(var_sel), scale = alt.Scale(zero = False), axis = alt.Axis(titleFontSize = 12, labelFontSize = 11)),
tooltip = [var_sel, "Theoretical quantiles", entity, time, "Index"]
)
st.altair_chart(qqplot + qqplot.transform_regression('Theoretical quantiles', var_sel).mark_line(size = 2, color = "darkred"), use_container_width = True)
if sett_hints:
st.info(str(fc.learning_hints("dv_qqplot")))
else: st.error("ERROR: Please select a numeric variable!")
else: st.error("ERROR: No data available for Data Visualization!")
# Check again after processing
if np.where(df[entity].isnull())[0].size > 0:
entity_na_warn = "WARNING: The variable selected for entity has NAs!"
else:entity_na_warn = False
if np.where(df[time].isnull())[0].size > 0:
time_na_warn = "WARNING: The variable selected for time has NAs!"
else:time_na_warn = False
#------------------------------------------------------------------------------------------
#++++++++++++++++++++++++++++++++++++++++++++
# PANEL DATA MODELLING
data_modelling_container = st.container()
with data_modelling_container:
#st.write("")
#st.write("")
#st.write("")
st.write("")
st.write("")
st.header("**Panel data modelling**")
st.markdown("Go for creating predictive models of your panel data using panel data modelling! STATY will take care of the modelling for you, so you can put your focus on results interpretation and communication! ")
PDM_settings = st.expander("Specify model", expanded = False)
with PDM_settings:
if time_na_warn == False and entity_na_warn == False:
# Initial status for running models
model_full_results = None
do_modval = "No"
model_val_results = None
model_full_results = None
panel_model_fit = None
if df.shape[1] > 2 and df.shape[0] > 0:
#--------------------------------------------------------------------------------------
# GENERAL SETTINGS
st.markdown("**Variable selection**")
# Variable categories
df_summary_model = fc.data_summary(df)
var_cat = df_summary_model["Variable types"].loc["category"]
# Response variable
response_var_options = df.columns
response_var_options = response_var_options[response_var_options.isin(df.drop(entity, axis = 1).columns)]
if time != "NA":
response_var_options = response_var_options[response_var_options.isin(df.drop(time, axis = 1).columns)]
response_var = st.selectbox("Select response variable", response_var_options, key = st.session_state['key'])
# Check if response variable is numeric and has no NAs
response_var_message_num = False
response_var_message_na = False
response_var_message_cat = False
if var_cat.loc[response_var] == "string/binary" or var_cat.loc[response_var] == "bool/binary":
response_var_message_num = "ERROR: Please select a numeric response variable!"
elif var_cat.loc[response_var] == "string/categorical" or var_cat.loc[response_var] == "other" or var_cat.loc[response_var] == "string/single":
response_var_message_num = "ERROR: Please select a numeric response variable!"
elif var_cat.loc[response_var] == "categorical":
response_var_message_cat = "WARNING: Categorical variable is treated as continuous variable!"
if response_var_message_num != False:
st.error(response_var_message_num)
if response_var_message_na != False:
st.error(response_var_message_na)
if response_var_message_cat != False:
st.warning(response_var_message_cat)
# Continue if everything is clean for response variable
if response_var_message_num == False and response_var_message_na == False:
# Select explanatory variables
expl_var_options = response_var_options[response_var_options.isin(df.drop(response_var, axis = 1).columns)]
expl_var = st.multiselect("Select explanatory variables", expl_var_options, key = st.session_state['key'])
var_list = list([entity]) + list([time]) + list([response_var]) + list(expl_var)
# Check if explanatory variables are numeric
expl_var_message_num = False
expl_var_message_na = False
if any(a for a in df[expl_var].dtypes if a != "float64" and a != "float32" and a != "int64" and a != "int64"):
expl_var_not_num = df[expl_var].select_dtypes(exclude=["int64", "int32", "float64", "float32"]).columns
expl_var_message_num = "ERROR: Please exclude non-numeric variables: " + ', '.join(map(str,list(expl_var_not_num)))
# Check if NAs are present and delete them automatically (delete before run models button)
if np.where(df[var_list].isnull())[0].size > 0:
st.warning("WARNING: Your modelling data set includes NAs. Rows with NAs are automatically deleted!")
if expl_var_message_num != False:
st.error(expl_var_message_num)
elif expl_var_message_na != False:
st.error(expl_var_message_na)
# Continue if everything is clean for explanatory variables and at least one was selected
elif expl_var_message_num == False and expl_var_message_na == False and len(expl_var) > 0:
#--------------------------------------------------------------------------------------
# ALGORITHMS
st.markdown("**Specify modelling algorithm**")
# Algorithms selection
col1, col2 = st.columns(2)
algorithms = ["Entity Fixed Effects", "Time Fixed Effects", "Two-ways Fixed Effects", "Random Effects", "Pooled"]
with col1:
PDM_alg = st.selectbox("Select modelling technique", algorithms)
# Covariance type
with col2:
PDM_cov_type = st.selectbox("Select covariance type", ["homoskedastic", "heteroskedastic", "clustered"])
PDM_cov_type2 = None
if PDM_cov_type == "clustered":
PDM_cov_type2 = st.selectbox("Select cluster type", ["entity", "time", "both"])
#--------------------------------------------------------------------------------------
# VALIDATION SETTINGS
st.markdown("**Validation settings**")
do_modval= st.selectbox("Use model validation", ["No", "Yes"])
if do_modval == "Yes":
col1, col2 = st.columns(2)
# Select training/ test ratio
with col1:
train_frac = st.slider("Select training data size", 0.5, 0.95, 0.8)
# Select number for validation runs
with col2:
val_runs = st.slider("Select number for validation runs", 5, 100, 10)
#--------------------------------------------------------------------------------------
# PREDICTION SETTINGS
st.markdown("**Model predictions**")
do_modprednew = st.selectbox("Use model prediction for new data", ["No", "Yes"])
if do_modprednew == "No":
df_new = pd.DataFrame()
if do_modprednew == "Yes":
# Upload new data
new_data_pred = st.file_uploader(" ", type=["csv", "txt"])
if new_data_pred is not None:
# Read data
if uploaded_data is not None:
df_new = pd.read_csv(new_data_pred, decimal=dec_sep, sep = col_sep,thousands=thousands_sep,encoding=encoding_val, engine='python')
else:
df_new = pd.read_csv(new_data_pred, sep = ";|,|\t",engine='python')
st.success('Loading data... done!')
# Transform columns if any were transformed
# Log-transformation
if sb_DM_dTrans_log is not None:
# List of log-transformed variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_log:
if "log_"+tv in expl_var:
tv_list.append(tv)
# Check if log-transformed explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for log-transformation in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
df_new = fc.var_transform_log(df_new, tv_list)
# Sqrt-transformation
if sb_DM_dTrans_sqrt is not None:
# List of sqrt-transformed variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_sqrt:
if "sqrt_"+tv in expl_var:
tv_list.append(tv)
# Check if sqrt-transformed explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for sqrt-transformation in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
df_new = fc.var_transform_sqrt(df_new, tv_list)
# Square-transformation
if sb_DM_dTrans_square is not None:
# List of square-transformed variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_square:
if "square_"+tv in expl_var:
tv_list.append(tv)
# Check if square-transformed explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for square-transformation in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
df_new = fc.var_transform_square(df_new, tv_list)
# Standardization
if sb_DM_dTrans_stand is not None:
# List of standardized variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_stand:
if "stand_"+tv in expl_var:
tv_list.append(tv)
# Check if standardized explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for standardization in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
# Use mean and standard deviation of original data for standardization
for tv in tv_list:
if df_new[tv].dtypes == "float64" or df_new[tv].dtypes == "int64" or df_new[tv].dtypes == "float32" or df_new[tv].dtypes == "int32":
if df[tv].std() != 0:
new_var_name = "stand_" + tv
new_var = (df_new[tv] - df[tv].mean())/df[tv].std()
df_new[new_var_name] = new_var
else:
st.error("ERROR: " + str(tv) + " is not numerical and cannot be standardized!")
return
# Normalization
if sb_DM_dTrans_norm is not None:
# List of normalized variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_norm:
if "norm_"+tv in expl_var:
tv_list.append(tv)
# Check if normalized explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for normalization in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
# Use min and max of original data for normalization
for tv in tv_list:
if df_new[tv].dtypes == "float64" or df_new[tv].dtypes == "int64" or df_new[tv].dtypes == "float32" or df_new[tv].dtypes == "int32":
if (df[tv].max()-df[tv].min()) != 0:
new_var_name = "norm_" + tv
new_var = (df_new[tv] - df[tv].min())/(df[tv].max()-df[tv].min())
df_new[new_var_name] = new_var
else:
st.error("ERROR: " + str(tv) + " is not numerical and cannot be normalized!")
return
# Categorization
if sb_DM_dTrans_numCat is not None:
# List of categorized variables that are included as explanatory variables
tv_list = []
for tv in sb_DM_dTrans_numCat:
if "numCat_"+tv in expl_var:
tv_list.append(tv)
# Check if categorized explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for categorization in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
# Use same categories as for original data
for tv in tv_list:
new_var_name = "numCat_" + tv
new_var = pd.DataFrame(index = df_new.index, columns = [new_var_name])
for r in df_new.index:
if df.loc[df[tv] == df_new[tv][r]].empty == False:
new_var.loc[r, new_var_name] = df["numCat_" + tv][df.loc[df[tv] == df_new[tv][r]].index[0]]
else:
st.error("ERROR: Category is missing for the value in row: "+ str(r) + ", variable: " + str(tv))
return
df_new[new_var_name] = new_var.astype('int64')
# Multiplication
if sb_DM_dTrans_mult != 0:
# List of multiplied variables that are included as explanatory variables
tv_list = []
for tv in range(0, sb_DM_dTrans_mult):
mult_name = "mult_" + str(multiplication_pairs.loc[tv]["Var1"]) + "_" + str(multiplication_pairs.loc[tv]["Var2"])
if mult_name in expl_var:
tv_list.append(str(multiplication_pairs.loc[tv]["Var1"]))
tv_list.append(str(multiplication_pairs.loc[tv]["Var2"]))
# Check if multiplied explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for multiplication in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
for var in range(0, sb_DM_dTrans_mult):
df_new = fc.var_transform_mult(df_new, multiplication_pairs.loc[var]["Var1"], multiplication_pairs.loc[var]["Var2"])
# Division
if sb_DM_dTrans_div != 0:
# List of divided variables that are included as explanatory variables
tv_list = []
for tv in range(0, sb_DM_dTrans_div):
mult_name = "div_" + str(division_pairs.loc[tv]["Var1"]) + "_" + str(division_pairs.loc[tv]["Var2"])
if mult_name in expl_var:
tv_list.append(str(division_pairs.loc[tv]["Var1"]))
tv_list.append(str(division_pairs.loc[tv]["Var2"]))
# Check if multiplied explanatory variables are available for transformation in new data columns
tv_list_not_avail = []
if tv_list:
for tv in tv_list:
if tv not in df_new.columns:
tv_list_not_avail.append(tv)
if tv_list_not_avail:
st.error("ERROR: Some variables are not available for division in new data: "+ ', '.join(tv_list_not_avail))
return
else:
# Transform data if variables for transformation are all available in new data
for var in range(0, sb_DM_dTrans_div):
df_new = fc.var_transform_div(df_new, division_pairs.loc[var]["Var1"], division_pairs.loc[var]["Var2"])
# Check if explanatory variables are available as columns as well as entity and time
expl_list = []
for expl_incl in expl_var:
if expl_incl not in df_new.columns:
expl_list.append(expl_incl)
if expl_list:
st.error("ERROR: Some variables are missing in new data: "+ ', '.join(expl_list))
return
if any(a for a in df_new.columns if a == entity) and any(a for a in df_new.columns if a == time):
st.info("All variables are available for predictions!")
elif any(a for a in df_new.columns if a == entity) == False:
st.error("ERROR: Entity variable is missing!")
return
elif any(a for a in df_new.columns if a == time) == False:
st.error("ERROR: Time variable is missing!")
return
# Check if NAs are present
if df_new.iloc[list(pd.unique(np.where(df_new.isnull())[0]))].shape[0] == 0:
st.empty()
else:
df_new = df_new[list([entity]) + list([time]) + expl_var].dropna()
st.warning("WARNING: Your new data set includes NAs. Rows with NAs are automatically deleted!")
df_new = df_new[list([entity]) + list([time]) + expl_var]
# Modelling data set
df = df[var_list]
# Check if NAs are present and delete them automatically
if np.where(df[var_list].isnull())[0].size > 0:
df = df.dropna()
#--------------------------------------------------------------------------------------
# SETTINGS SUMMARY
st.write("")
# Show modelling data
if st.checkbox("Show modelling data"):
st.write(df)
st.write("Data shape: ", df.shape[0], " rows and ", df.shape[1], " columns")
# Download link for modelling data
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df.to_excel(excel_file, sheet_name="modelling_data")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name= "Modelling data__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download modelling data</a>
""",
unsafe_allow_html=True)
st.write("")
# Show prediction data
if do_modprednew == "Yes":
if new_data_pred is not None:
if st.checkbox("Show new data for predictions"):
st.write(df_new)
st.write("Data shape: ", df_new.shape[0], " rows and ", df_new.shape[1], " columns")
# Download link for forecast data
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
df_new.to_excel(excel_file, sheet_name="new_data")
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name= "New data for predictions__" + df_name + ".xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download new data for predictions</a>
""",
unsafe_allow_html=True)
st.write("")
# Show modelling settings
if st.checkbox('Show a summary of modelling settings', value = False):
#--------------------------------------------------------------------------------------
# ALOGRITHMS
st.write("Algorithms summary:")
st.write("- ",PDM_alg)
st.write("- Covariance type: ", PDM_cov_type)
if PDM_cov_type2 is not None:
st.write("- Cluster type: ", PDM_cov_type2)
st.write("")
#--------------------------------------------------------------------------------------
# SETTINGS
# General settings summary
st.write("General settings summary:")
# Modelling formula
if expl_var != False:
st.write("- Modelling formula:", response_var, "~", ' + '.join(expl_var))
st.write("- Entity:", entity)
st.write("- Time:", time)
if do_modval == "Yes":
# Train/ test ratio
if train_frac != False:
st.write("- Train/ test ratio:", str(round(train_frac*100)), "% / ", str(round(100-train_frac*100)), "%")
# Validation runs
if val_runs != False:
st.write("- Validation runs:", str(val_runs))
st.write("")
st.write("")
#--------------------------------------------------------------------------------------
# RUN MODELS
# Models are run on button click
st.write("")
run_models = st.button("Run model")
st.write("")
# Run everything on button click
if run_models:
# Check if new data available
if do_modprednew == "Yes":
if new_data_pred is None:
st.error("ERROR: Please upload new data for additional model predictions or select 'No'!")
return
# Define clustered cov matrix "entity", "time", "both"
cluster_entity = True
cluster_time = False
if PDM_cov_type == "clustered":
if PDM_cov_type2 == "entity":
cluster_entity = True
cluster_time = False
if PDM_cov_type2 == "time":
cluster_entity = False
cluster_time = True
if PDM_cov_type2 == "both":
cluster_entity = True
cluster_time = True
# Prepare data
data = df.set_index([entity, time])
Y_data = data[response_var]
X_data1 = data[expl_var] # for efe, tfe, twfe
X_data2 = sm.add_constant(data[expl_var]) # for re, pool
# Model validation
if do_modval == "Yes":
# Progress bar
st.info("Validation progress")
my_bar = st.progress(0.0)
progress1 = 0
# Model validation
# R²
model_eval_r2 = pd.DataFrame(index = range(val_runs), columns = [response_var])
# MSE
model_eval_mse = pd.DataFrame(index = range(val_runs), columns = ["Value"])
# RMSE
model_eval_rmse = pd.DataFrame(index = range(val_runs), columns = ["Value"])
# MAE
model_eval_mae = pd.DataFrame(index = range(val_runs), columns = ["Value"])
# MaxERR
model_eval_maxerr = pd.DataFrame(index = range(val_runs), columns = ["Value"])
# EVRS
model_eval_evrs = pd.DataFrame(index = range(val_runs), columns = ["Value"])
# SSR
model_eval_ssr = pd.DataFrame(index = range(val_runs), columns = ["Value"])
# Model validation summary
model_eval_mean = pd.DataFrame(index = ["% VE", "MSE", "RMSE", "MAE", "MaxErr", "EVRS", "SSR"], columns = ["Value"])
model_eval_sd = pd.DataFrame(index = ["% VE", "MSE", "RMSE", "MAE", "MaxErr", "EVRS", "SSR"], columns = ["Value"])
# Collect all residuals in test runs
resdiuals_allruns = {}
for val in range(val_runs):
# Split data into train/ test data
if PDM_alg != "Pooled" and PDM_alg != "Random Effects":
X_data = X_data1.copy()
if PDM_alg == "Pooled" or PDM_alg == "Random Effects":
X_data = X_data2.copy()
X_train, X_test, Y_train, Y_test = train_test_split(X_data, Y_data, train_size = train_frac, random_state = val)
# Train selected panel model
# efe
if PDM_alg == "Entity Fixed Effects":
panel_model_efe_val = PanelOLS(Y_train, X_train, entity_effects = True, time_effects = False)
panel_model_fit_efe_val = panel_model_efe_val.fit(cov_type = PDM_cov_type, cluster_entity = cluster_entity, cluster_time = cluster_time, debiased = True, auto_df = True)
# tfe
if PDM_alg == "Time Fixed Effects":
panel_model_tfe_val = PanelOLS(Y_train, X_train, entity_effects = False, time_effects = True)
panel_model_fit_tfe_val = panel_model_tfe_val.fit(cov_type = PDM_cov_type, cluster_entity = cluster_entity, cluster_time = cluster_time, debiased = True, auto_df = True)
# twfe
if PDM_alg == "Two-ways Fixed Effects":
panel_model_twfe_val = PanelOLS(Y_train, X_train, entity_effects = True, time_effects = True)
panel_model_fit_twfe_val = panel_model_twfe_val.fit(cov_type = PDM_cov_type, cluster_entity = cluster_entity, cluster_time = cluster_time, debiased = True, auto_df = True)
# re
if PDM_alg == "Random Effects":
panel_model_re_val = RandomEffects(Y_train, X_train)
panel_model_fit_re_val = panel_model_re_val.fit(cov_type = PDM_cov_type, cluster_entity = cluster_entity, cluster_time = cluster_time, debiased = True, auto_df = True)
# pool
if PDM_alg == "Pooled":
panel_model_pool_val = PooledOLS(Y_train, X_train)
panel_model_fit_pool_val = panel_model_pool_val.fit(cov_type = PDM_cov_type, cluster_entity = cluster_entity, cluster_time = cluster_time, debiased = True, auto_df = True)
# save selected model
if PDM_alg == "Entity Fixed Effects":
panel_model_fit_val = panel_model_fit_efe_val
if PDM_alg == "Time Fixed Effects":
panel_model_fit_val = panel_model_fit_tfe_val
if PDM_alg == "Two-ways Fixed Effects":
panel_model_fit_val = panel_model_fit_twfe_val
if PDM_alg == "Random Effects":
panel_model_fit_val = panel_model_fit_re_val
if PDM_alg == "Pooled":
panel_model_fit_val = panel_model_fit_pool_val
# Extract effects
if PDM_alg != "Pooled":
comb_effects = panel_model_fit_val.estimated_effects
ent_effects = pd.DataFrame(index = X_train.reset_index()[entity].drop_duplicates(), columns = ["Value"])
time_effects = pd.DataFrame(index = sorted(list(X_train.reset_index()[time].drop_duplicates())), columns = ["Value"])
# Use LSDV for estimating effects
if PDM_alg == "Entity Fixed Effects":
X_train_mlr = pd.concat([X_train.reset_index(drop = True), pd.get_dummies(X_train.reset_index()[entity])], axis = 1)
Y_train_mlr = Y_train.reset_index(drop = True)
model_mlr_val = sm.OLS(Y_train_mlr, X_train_mlr)
model_mlr_fit_val = model_mlr_val.fit()
for e in ent_effects.index:
ent_effects.loc[e]["Value"] = model_mlr_fit_val.params[e]
for t in time_effects.index:
time_effects.loc[t]["Value"] = 0
if PDM_alg == "Time Fixed Effects":
X_train_mlr = pd.concat([X_train.reset_index(drop = True), pd.get_dummies(X_train.reset_index()[time])], axis = 1)
Y_train_mlr = Y_train.reset_index(drop = True)
model_mlr_val = sm.OLS(Y_train_mlr, X_train_mlr)
model_mlr_fit_val = model_mlr_val.fit()
for e in ent_effects.index:
ent_effects.loc[e]["Value"] = 0
for t in time_effects.index:
time_effects.loc[t]["Value"] = model_mlr_fit_val.params[t]
if PDM_alg == "Two-ways Fixed Effects":
X_train_mlr = pd.concat([X_train.reset_index(drop = True), pd.get_dummies(X_train.reset_index()[entity]), pd.get_dummies(X_train.reset_index()[time])], axis = 1)
Y_train_mlr = Y_train.reset_index(drop = True)
model_mlr_val = sm.OLS(Y_train_mlr, X_train_mlr)
model_mlr_fit_val = model_mlr_val.fit()
for e in ent_effects.index:
ent_effects.loc[e]["Value"] = model_mlr_fit_val.params[e]
for t in time_effects.index:
time_effects.loc[t]["Value"] = model_mlr_fit_val.params[t]
if PDM_alg == "Random Effects":
for e in ent_effects.index:
ent_effects.loc[e]["Value"] = comb_effects.loc[e,].reset_index(drop = True).iloc[0][0]
# Prediction for Y_test (without including effects)
Y_test_pred = panel_model_fit_val.predict(X_test)
# Add effects for predictions
for p in range(Y_test_pred.size):
entity_ind = Y_test_pred.index[p][0]
time_ind = Y_test_pred.index[p][1]
# if effects are available, add effect
if PDM_alg == "Entity Fixed Effects":
if any(a for a in ent_effects.index if a == entity_ind):
effect = ent_effects.loc[entity_ind][0]
Y_test_pred["predictions"].loc[entity_ind, time_ind] = Y_test_pred["predictions"].loc[entity_ind, time_ind] + effect
if PDM_alg == "Time Fixed Effects":
if any(a for a in time_effects.index if a == time_ind):
effect = time_effects.loc[time_ind][0]
Y_test_pred["predictions"].loc[entity_ind, time_ind] = Y_test_pred["predictions"].loc[entity_ind, time_ind] + effect
if PDM_alg == "Two-ways Fixed Effects":
if any(a for a in time_effects.index if a == time_ind):
effect_time = time_effects.loc[time_ind][0]
else: effect_time = 0
if any(a for a in ent_effects.index if a == entity_ind):
effect_entity = ent_effects.loc[entity_ind][0]
else: effect_entity = 0
Y_test_pred["predictions"].loc[entity_ind, time_ind] = Y_test_pred["predictions"].loc[entity_ind, time_ind] + effect_entity + effect_time
if PDM_alg == "Random Effects":
if any(a for a in ent_effects.index if a == entity_ind):
effect = ent_effects.loc[entity_ind][0]
Y_test_pred["predictions"].loc[entity_ind, time_ind] = Y_test_pred["predictions"].loc[entity_ind, time_ind] + effect
# Adjust format
Y_test_pred = Y_test_pred.reset_index()["predictions"]
Y_test = Y_test.reset_index()[response_var]
# Save R² for test data
model_eval_r2.iloc[val][response_var] = r2_score(Y_test, Y_test_pred)
# Save MSE for test data
model_eval_mse.iloc[val]["Value"] = mean_squared_error(Y_test, Y_test_pred, squared = True)
# Save RMSE for test data
model_eval_rmse.iloc[val]["Value"] = mean_squared_error(Y_test, Y_test_pred, squared = False)
# Save MAE for test data
model_eval_mae.iloc[val]["Value"] = mean_absolute_error(Y_test, Y_test_pred)
# Save MaxERR for test data
model_eval_maxerr.iloc[val]["Value"] = max_error(Y_test, Y_test_pred)
# Save explained variance regression score for test data
model_eval_evrs.iloc[val]["Value"] = explained_variance_score(Y_test, Y_test_pred)
# Save sum of squared residuals for test data
model_eval_ssr.iloc[val]["Value"] = ((Y_test-Y_test_pred)**2).sum()
# Save residual values for test data
res = Y_test-Y_test_pred
resdiuals_allruns[val] = res
progress1 += 1
my_bar.progress(progress1/(val_runs))
# Calculate mean performance statistics
# Mean
model_eval_mean.loc["% VE"]["Value"] = model_eval_r2[response_var].mean()
model_eval_mean.loc["MSE"]["Value"] = model_eval_mse["Value"].mean()
model_eval_mean.loc["RMSE"]["Value"] = model_eval_rmse["Value"].mean()
model_eval_mean.loc["MAE"]["Value"] = model_eval_mae["Value"].mean()
model_eval_mean.loc["MaxErr"]["Value"] = model_eval_maxerr["Value"].mean()
model_eval_mean.loc["EVRS"]["Value"] = model_eval_evrs["Value"].mean()
model_eval_mean.loc["SSR"]["Value"] = model_eval_ssr["Value"].mean()
# Sd
model_eval_sd.loc["% VE"]["Value"] = model_eval_r2[response_var].std()
model_eval_sd.loc["MSE"]["Value"] = model_eval_mse["Value"].std()
model_eval_sd.loc["RMSE"]["Value"] = model_eval_rmse["Value"].std()
model_eval_sd.loc["MAE"]["Value"] = model_eval_mae["Value"].std()
model_eval_sd.loc["MaxErr"]["Value"] = model_eval_maxerr["Value"].std()
model_eval_sd.loc["EVRS"]["Value"] = model_eval_evrs["Value"].std()
model_eval_sd.loc["SSR"]["Value"] = model_eval_ssr["Value"].std()
# Residuals
residuals_collection = pd.DataFrame()
for x in resdiuals_allruns:
residuals_collection = residuals_collection.append(pd.DataFrame(resdiuals_allruns[x]), ignore_index = True)
residuals_collection.columns = [response_var]
# Collect validation results
model_val_results = {}
model_val_results["mean"] = model_eval_mean
model_val_results["sd"] = model_eval_sd
model_val_results["residuals"] = residuals_collection
model_val_results["variance explained"] = model_eval_r2
# Full model
# Progress bar
st.info("Full model progress")
my_bar_fm = st.progress(0.0)
progress2 = 0
# efe
panel_model_efe = PanelOLS(Y_data, X_data1, entity_effects = True, time_effects = False)
panel_model_fit_efe = panel_model_efe.fit(cov_type = PDM_cov_type, cluster_entity = cluster_entity, cluster_time = cluster_time, debiased = True, auto_df = True)
# tfe
panel_model_tfe = PanelOLS(Y_data, X_data1, entity_effects = False, time_effects = True)
panel_model_fit_tfe = panel_model_tfe.fit(cov_type = PDM_cov_type, cluster_entity = cluster_entity, cluster_time = cluster_time, debiased = True, auto_df = True)
# twfe
panel_model_twfe = PanelOLS(Y_data, X_data1, entity_effects = True, time_effects = True)
panel_model_fit_twfe = panel_model_twfe.fit(cov_type = PDM_cov_type, cluster_entity = cluster_entity, cluster_time = cluster_time, debiased = True, auto_df = True)
# re
panel_model_re = RandomEffects(Y_data, X_data2)
panel_model_fit_re = panel_model_re.fit(cov_type = PDM_cov_type, cluster_entity = cluster_entity, cluster_time = cluster_time, debiased = True, auto_df = True)
# pool
panel_model_pool = PooledOLS(Y_data, X_data2)
panel_model_fit_pool = panel_model_pool.fit(cov_type = PDM_cov_type, cluster_entity = cluster_entity, cluster_time = cluster_time, debiased = True, auto_df = True)
# save selected model
if PDM_alg == "Entity Fixed Effects":
panel_model_fit = panel_model_fit_efe
if PDM_alg == "Time Fixed Effects":
panel_model_fit = panel_model_fit_tfe
if PDM_alg == "Two-ways Fixed Effects":
panel_model_fit = panel_model_fit_twfe
if PDM_alg == "Random Effects":
panel_model_fit = panel_model_fit_re
if PDM_alg == "Pooled":
panel_model_fit = panel_model_fit_pool
# Entity information
ent_inf = pd.DataFrame(index = ["No. entities", "Avg observations", "Median observations", "Min observations", "Max observations"], columns = ["Value"])
ent_inf.loc["No. entities"] = panel_model_fit.entity_info["total"]
ent_inf.loc["Avg observations"] = panel_model_fit.entity_info["mean"]
ent_inf.loc["Median observations"] = panel_model_fit.entity_info["median"]
ent_inf.loc["Min observations"] = panel_model_fit.entity_info["min"]
ent_inf.loc["Max observations"] = panel_model_fit.entity_info["max"]
# Time information
time_inf = pd.DataFrame(index = ["No. time periods", "Avg observations", "Median observations", "Min observations", "Max observations"], columns = ["Value"])
time_inf.loc["No. time periods"] = panel_model_fit.time_info["total"]
time_inf.loc["Avg observations"] = panel_model_fit.time_info["mean"]
time_inf.loc["Median observations"] = panel_model_fit.time_info["median"]
time_inf.loc["Min observations"] = panel_model_fit.time_info["min"]
time_inf.loc["Max observations"] = panel_model_fit.time_info["max"]
# Regression information
reg_inf = pd.DataFrame(index = ["Dep. variable", "Estimator", "Method", "No. observations", "DF residuals", "DF model", "Covariance type"], columns = ["Value"])
reg_inf.loc["Dep. variable"] = response_var
reg_inf.loc["Estimator"] = panel_model_fit.name
if PDM_alg == "Entity Fixed Effects" or PDM_alg == "Time Fixed Effects" or "Two-ways Fixed":
reg_inf.loc["Method"] = "Within"
if PDM_alg == "Random Effects":
reg_inf.loc["Method"] = "Quasi-demeaned"
if PDM_alg == "Pooled":
reg_inf.loc["Method"] = "Least squares"
reg_inf.loc["No. observations"] = panel_model_fit.nobs
reg_inf.loc["DF residuals"] = panel_model_fit.df_resid
reg_inf.loc["DF model"] = panel_model_fit.df_model
reg_inf.loc["Covariance type"] = panel_model_fit._cov_type
# Regression statistics
fitted = df[response_var]-panel_model_fit.resids.values
obs = df[response_var]
reg_stats = pd.DataFrame(index = ["R²", "R² (between)", "R² (within)", "R² (overall)", "Log-likelihood", "SST", "SST (overall)"], columns = ["Value"])
reg_stats.loc["R²"] = panel_model_fit._r2
reg_stats.loc["R² (between)"] = panel_model_fit._c2b**2
reg_stats.loc["R² (within)"] = panel_model_fit._c2w**2
reg_stats.loc["R² (overall)"] = panel_model_fit._c2o**2
reg_stats.loc["Log-likelihood"] = panel_model_fit._loglik
reg_stats.loc["SST"] = panel_model_fit.total_ss
reg_stats.loc["SST (overall)"] = ((obs-obs.mean())**2).sum()
# Overall performance metrics (with effects)
reg_overall = pd.DataFrame(index = ["% VE", "MSE", "RMSE", "MAE", "MaxErr", "EVRS", "SSR"], columns = ["Value"])
reg_overall.loc["% VE"] = r2_score(obs, fitted)
reg_overall.loc["MSE"] = mean_squared_error(obs, fitted, squared = True)
reg_overall.loc["RMSE"] = mean_squared_error(obs, fitted, squared = False)
reg_overall.loc["MAE"] = mean_absolute_error(obs, fitted)
reg_overall.loc["MaxErr"] = max_error(obs, fitted)
reg_overall.loc["EVRS"] = explained_variance_score(obs, fitted)
reg_overall.loc["SSR"] = ((obs-fitted)**2).sum()
# ANOVA
if PDM_alg == "Pooled":
Y_data_mlr = df[response_var]
X_data_mlr = sm.add_constant(df[expl_var])
full_model_mlr = sm.OLS(Y_data_mlr, X_data_mlr)
full_model_fit = full_model_mlr.fit()
reg_anova = pd.DataFrame(index = ["Regression", "Residual", "Total"], columns = ["DF", "SS", "MS", "F-statistic"])
reg_anova.loc["Regression"]["DF"] = full_model_fit.df_model
reg_anova.loc["Regression"]["SS"] = full_model_fit.ess
reg_anova.loc["Regression"]["MS"] = full_model_fit.ess/full_model_fit.df_model
reg_anova.loc["Regression"]["F-statistic"] = full_model_fit.fvalue
reg_anova.loc["Residual"]["DF"] = full_model_fit.df_resid
reg_anova.loc["Residual"]["SS"] = full_model_fit.ssr
reg_anova.loc["Residual"]["MS"] = full_model_fit.ssr/full_model_fit.df_resid
reg_anova.loc["Residual"]["F-statistic"] = ""
reg_anova.loc["Total"]["DF"] = full_model_fit.df_resid + full_model_fit.df_model
reg_anova.loc["Total"]["SS"] = full_model_fit.ssr + full_model_fit.ess
reg_anova.loc["Total"]["MS"] = ""
reg_anova.loc["Total"]["F-statistic"] = ""
# Coefficients
if PDM_alg == "Entity Fixed Effects" or PDM_alg == "Time Fixed Effects" or "Two-ways Fixed Effects":
reg_coef = pd.DataFrame(index = expl_var, columns = ["coeff", "std err", "t-statistic", "p-value", "lower 95%", "upper 95%"])
for c in expl_var:
reg_coef.loc[c]["coeff"] = panel_model_fit.params[expl_var.index(c)]
reg_coef.loc[c]["std err"] = panel_model_fit.std_errors.loc[c]
reg_coef.loc[c]["t-statistic"] = panel_model_fit.tstats.loc[c]
reg_coef.loc[c]["p-value"] = panel_model_fit.pvalues.loc[c]
reg_coef.loc[c]["lower 95%"] = panel_model_fit.conf_int(level = 0.95).loc[c]["lower"]
reg_coef.loc[c]["upper 95%"] = panel_model_fit.conf_int(level = 0.95).loc[c]["upper"]
if PDM_alg == "Random Effects" or PDM_alg == "Pooled":
reg_coef = pd.DataFrame(index = ["const"]+ expl_var, columns = ["coeff", "std err", "t-statistic", "p-value", "lower 95%", "upper 95%"])
for c in ["const"] + expl_var:
reg_coef.loc[c]["coeff"] = panel_model_fit.params[(["const"]+ expl_var).index(c)]
reg_coef.loc[c]["std err"] = panel_model_fit.std_errors.loc[c]
reg_coef.loc[c]["t-statistic"] = panel_model_fit.tstats.loc[c]
reg_coef.loc[c]["p-value"] = panel_model_fit.pvalues.loc[c]
reg_coef.loc[c]["lower 95%"] = panel_model_fit.conf_int(level = 0.95).loc[c]["lower"]
reg_coef.loc[c]["upper 95%"] = panel_model_fit.conf_int(level = 0.95).loc[c]["upper"]
# Effects
reg_ent_effects = pd.DataFrame(index = df[entity].drop_duplicates(), columns = ["Value"])
reg_time_effects = pd.DataFrame(index = sorted(list(df[time].drop_duplicates())), columns = ["Value"])
reg_comb_effects = panel_model_fit.estimated_effects
reg_comb_effects.columns = ["Value"]
# Use LSDV for estimating effects
Y_data_mlr = df[response_var]
if PDM_alg == "Pooled" or PDM_alg == "Random Effects":
X_data_mlr = sm.add_constant(df[expl_var])
else: X_data_mlr = df[expl_var]
if PDM_alg == "Entity Fixed Effects":
X_data_mlr = pd.concat([X_data_mlr, pd.get_dummies(df[entity])], axis = 1)
model_mlr = sm.OLS(Y_data_mlr, X_data_mlr)
model_mlr_fit = model_mlr.fit()
for e in reg_ent_effects.index:
reg_ent_effects.loc[e]["Value"] = model_mlr_fit.params[e]
for t in reg_time_effects.index:
reg_time_effects.loc[t]["Value"] = 0
if PDM_alg == "Time Fixed Effects":
X_data_mlr = pd.concat([X_data_mlr, pd.get_dummies(df[time])], axis = 1)
model_mlr = sm.OLS(Y_data_mlr, X_data_mlr)
model_mlr_fit = model_mlr.fit()
for e in reg_ent_effects.index:
reg_ent_effects.loc[e]["Value"] = 0
for t in reg_time_effects.index:
reg_time_effects.loc[t]["Value"] = model_mlr_fit.params[t]
if PDM_alg == "Two-ways Fixed Effects":
X_data_mlr = pd.concat([X_data_mlr, pd.get_dummies(df[entity]), pd.get_dummies(df[time])], axis = 1)
model_mlr = sm.OLS(Y_data_mlr, X_data_mlr)
model_mlr_fit = model_mlr.fit()
for e in reg_ent_effects.index:
reg_ent_effects.loc[e]["Value"] = model_mlr_fit.params[e]
for t in reg_time_effects.index:
reg_time_effects.loc[t]["Value"] = model_mlr_fit.params[t]
if PDM_alg == "Random Effects":
for e in reg_ent_effects.index:
reg_ent_effects.loc[e]["Value"] = reg_comb_effects.loc[e,].reset_index(drop = True).iloc[0][0]
for t in reg_time_effects.index:
reg_time_effects.loc[t]["Value"] = 0
# New predictions
if df_new.empty == False:
data_new = df_new.set_index([entity, time])
X_data1_new = data_new[expl_var] # for efe, tfe, twfe
X_data2_new = sm.add_constant(data_new[expl_var]) # for re, pool
if PDM_alg != "Pooled" and PDM_alg != "Random Effects":
X_data_new = X_data1_new.copy()
if PDM_alg == "Pooled" or PDM_alg == "Random Effects":
X_data_new = X_data2_new.copy()
# Prediction for new prediction data (without including effects)
Y_pred_new = panel_model_fit.predict(X_data_new)
# Add effects for new predictions
for p in range(Y_pred_new.size):
entity_ind = Y_pred_new.index[p][0]
time_ind = Y_pred_new.index[p][1]
# if effects are available, add effect
if PDM_alg == "Entity Fixed Effects":
if any(a for a in reg_ent_effects.index if a == entity_ind):
effect = reg_ent_effects.loc[entity_ind][0]
Y_pred_new["predictions"].loc[entity_ind, time_ind] = Y_pred_new["predictions"].loc[entity_ind, time_ind] + effect
if PDM_alg == "Time Fixed Effects":
if any(a for a in reg_time_effects.index if a == time_ind):
effect = reg_time_effects.loc[time_ind][0]
Y_pred_new["predictions"].loc[entity_ind, time_ind] = Y_pred_new["predictions"].loc[entity_ind, time_ind] + effect
if PDM_alg == "Two-ways Fixed Effects":
if any(a for a in reg_time_effects.index if a == time_ind):
effect_time = reg_time_effects.loc[time_ind][0]
else: effect_time = 0
if any(a for a in reg_ent_effects.index if a == entity_ind):
effect_entity = reg_ent_effects.loc[entity_ind][0]
else: effect_entity = 0
Y_pred_new["predictions"].loc[entity_ind, time_ind] = Y_pred_new["predictions"].loc[entity_ind, time_ind] + effect_entity + effect_time
if PDM_alg == "Random Effects":
if any(a for a in reg_ent_effects.index if a == entity_ind):
effect = reg_ent_effects.loc[entity_ind][0]
Y_pred_new["predictions"].loc[entity_ind, time_ind] = Y_pred_new["predictions"].loc[entity_ind, time_ind] + effect
# Variance decomposition
if PDM_alg == "Random Effects":
reg_var_decomp = pd.DataFrame(index = ["idiosyncratic", "individual"], columns = ["variance", "share"])
reg_theta = pd.DataFrame(index = ["theta"], columns = df[entity].drop_duplicates())
reg_var_decomp.loc["idiosyncratic"]["variance"] = panel_model_fit.variance_decomposition["Residual"]
reg_var_decomp.loc["individual"]["variance"] = panel_model_fit.variance_decomposition["Effects"]
reg_var_decomp.loc["idiosyncratic"]["share"] = panel_model_fit.variance_decomposition["Residual"]/(panel_model_fit.variance_decomposition["Residual"]+panel_model_fit.variance_decomposition["Effects"])
reg_var_decomp.loc["individual"]["share"] = panel_model_fit.variance_decomposition["Effects"]/(panel_model_fit.variance_decomposition["Residual"]+panel_model_fit.variance_decomposition["Effects"])
reg_theta.loc["theta"] = list(panel_model_fit.theta.values)
for j in reg_theta.columns:
reg_theta.loc["theta"][j] = reg_theta.loc["theta"][j][0]
# Statistical tests
if PDM_alg == "Entity Fixed Effects":
if PDM_cov_type == "homoskedastic":
reg_test = pd.DataFrame(index = ["test statistic", "p-value", "distribution"], columns = ["F-test (non-robust)", "F-test (robust)", "F-test (poolability)", "Hausman-test"])
else:
reg_test = pd.DataFrame(index = ["test statistic", "p-value", "distribution"], columns = ["F-test (non-robust)", "F-test (robust)", "F-test (poolability)"])
else:
reg_test = pd.DataFrame(index = ["test statistic", "p-value", "distribution"], columns = ["F-test (non-robust)", "F-test (robust)", "F-test (poolability)"])
if PDM_alg == "Pooled" or PDM_alg == "Random Effects":
reg_test = pd.DataFrame(index = ["test statistic", "p-value", "distribution"], columns = ["F-test (non-robust)", "F-test (robust)"])
reg_test.loc["test statistic"]["F-test (non-robust)"] = panel_model_fit.f_statistic.stat
reg_test.loc["p-value"]["F-test (non-robust)"] = panel_model_fit.f_statistic.pval
reg_test.loc["distribution"]["F-test (non-robust)"] = "F(" + str(panel_model_fit.f_statistic.df) + ", " + str(panel_model_fit.f_statistic.df_denom) + ")"
reg_test.loc["test statistic"]["F-test (robust)"] = panel_model_fit.f_statistic_robust.stat
reg_test.loc["p-value"]["F-test (robust)"] = panel_model_fit.f_statistic_robust.pval
reg_test.loc["distribution"]["F-test (robust)"] = "F(" + str(panel_model_fit.f_statistic_robust.df) + ", " + str(panel_model_fit.f_statistic_robust.df_denom) + ")"
if PDM_alg != "Pooled" and PDM_alg != "Random Effects" :
reg_test.loc["test statistic"]["F-test (poolability)"] = panel_model_fit.f_pooled.stat
reg_test.loc["p-value"]["F-test (poolability)"] = panel_model_fit.f_pooled.pval
reg_test.loc["distribution"]["F-test (poolability)"] = "F(" + str(panel_model_fit.f_pooled.df) + ", " + str(panel_model_fit.f_pooled.df_denom) + ")"
if PDM_alg == "Entity Fixed Effects":
if PDM_cov_type == "homoskedastic":
reg_test.loc["test statistic"]["Hausman-test"] = fc.hausman_test(panel_model_fit, panel_model_fit_re)[0]
reg_test.loc["p-value"]["Hausman-test"] = fc.hausman_test(panel_model_fit, panel_model_fit_re)[2]
reg_test.loc["distribution"]["Hausman-test"] = "Chi²(" + str(fc.hausman_test(panel_model_fit, panel_model_fit_re)[1]) + ")"
# Heteroskedasticity tests
reg_het_test =
|
pd.DataFrame(index = ["test statistic", "p-value"], columns = ["Breusch-Pagan test", "White test (without int.)", "White test (with int.)"])
|
pandas.DataFrame
|
"""
Comparison of Coding Flank Effects and Critical SpacerC1A Mutant
--------------------------------------------------------------------------------
Author: <NAME>
Last Modified: January 7, 2020
License: MIT
Description
--------------------------------------------------------------------------------
This script generates SI Figures 4 and 5 which shows the effect of variable
coding flanks on the looping frequency, dwell time, and cutting probability
on the reference V4-57-1 RSS as well as effects from a C to A mutation at the
first positon of the spacer.
Notes
--------------------------------------------------------------------------------
This script is designed to be executed from the `code/figures` directory and
loads the proper CSV files from a relative path.
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.gridspec as gridspec
import vdj.io
import vdj.viz
vdj.viz.plotting_style()
# Upload data on coding flank-relevant RSSs
cf_muts = ['WT12rss', '12CodC6A', '12SpacC1A', 'V4-55']
loop = pd.read_csv('../../data/compiled_looping_frequency_bootstrap.csv',
comment='#')
loop = loop[(loop['mutant'].isin(cf_muts)) &
(loop['hmgb1']==80) & (loop['salt']=='Mg')]
loop = loop.replace(to_replace='WT12rss', value='V4-57-1 (ref)')
dwell = pd.read_csv('../../data/compiled_dwell_times.csv', comment='#')
dwell = dwell[(dwell['mutant'].isin(cf_muts)) & (dwell['hmgb1']==80) & (dwell['salt']=='Mg')]
dwell = dwell.replace(to_replace='WT12rss', value='V4-57-1 (ref)')
dwell_cut = dwell[dwell['cut']==1].copy()
dwell_unloop = dwell[dwell['cut']==0].copy()
cut_posts = pd.read_csv('../../data/pooled_cutting_probability_posteriors.csv',
comment='#')
cut_posts = cut_posts[(cut_posts['mutant'].isin(cf_muts)) & (cut_posts['hmgb1']==80) & (cut_posts['salt']=='Mg')]
cut_posts = cut_posts.replace(to_replace='WT12rss', value='V4-57-1 (ref)')
# Create dwell ECDFs
dfs = []
for source in [dwell, dwell_cut, dwell_unloop]:
dist_df = []
for g,d in source.groupby('mutant'):
x,y = np.sort(d['dwell_time_min'].values), np.arange(0, len(d), 1) / len(d)
y[-1] = 1
_df = pd.DataFrame()
_df['x'] = x
_df['y'] = y
_df['mutant'] = g
dist_df.append(_df)
dwell_dist =
|
pd.concat(dist_df)
|
pandas.concat
|
#%% Load libs
import geopandas
from pathlib import Path
import pandas as pd
import datetime
#%% Setup paths
INPUT_PATH = Path.cwd() / "input"
ce_legacy_path = INPUT_PATH / "CookEastStrips" / "Field_Plan_Final.shp"
ce_2016_c01_path = INPUT_PATH / "FromIanLeslie_CafGeospatial" / "CE_CW_WGS1984" / "CE_WGS1984_2016_OperationalFieldBoundaries" / "C01" / "C0117001.shp"
ce_2016_c02_path = INPUT_PATH / "FromIanLeslie_CafGeospatial" / "CE_CW_WGS1984" / "CE_WGS1984_2016_OperationalFieldBoundaries" / "C02" / "C0217001.shp"
ce_current_path = INPUT_PATH / "20170206_CafRoughBoundaries" / "CafCookEastArea.shp"
cw_current_path = INPUT_PATH / "FromIanLeslie_CafGeospatial" / "CE_CW_WGS1984" / "CookWestBoundariesWGS1984" / "CookWestBoundariesWGS1984.shp"
#%% Load and clean data
# Cook East Legacy (1999 - 2015)
ce_legacy = (geopandas.read_file(ce_legacy_path)
.query("Ind_Field > 0 and Ind_Field < 21")
.to_crs({"init": "epsg:4326"})
.assign(
Exp_Unit_ID = lambda x: x.Field.astype(str) + x.Strip.astype(str),
Status = "Past",
Start_Date = "01/01/1999",
End_Date = "12/31/2015",
Treatment_ID = "ASP")
.drop(
["Strip", "Field", "Crop", "Area", "Perimeter", "Area_ac", "Ind_Field"],
axis = 1))
# Cook East 2016
ce_2016_c01 = (geopandas.read_file(ce_2016_c01_path)
.assign(Exp_Unit_ID = "C01"))
ce_2016_c02 = (geopandas.read_file(ce_2016_c02_path)
.assign(Exp_Unit_ID = "C02"))
ce_2016 = (
|
pd.concat([ce_2016_c01, ce_2016_c02], ignore_index=True)
|
pandas.concat
|
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime
plt.rcParams['font.size'] = 6
import os
root_path = os.path.dirname(os.path.abspath('__file__'))
graphs_path = root_path+'/boundary_effect/graph/'
if not os.path.exists(graphs_path):
os.makedirs(graphs_path)
time = pd.read_csv(root_path+'/time_series/MonthlyRunoffWeiRiver.csv')['Time']
time = time.values
time = [datetime.strptime(t,'%Y/%m') for t in time]
time = [t.strftime('%b %Y') for t in time]
print(time)
# CHECK 1: is SSA shift-invariant?
# If yes, any shifted copy of an IMF from a SSA decomposition, similar to a
# shifted copy of the original time series, should be maintained.
# For example, given the sunspot time series x (of length 792) we can
# generate a 1-step advanced copy of the original time series as follows:
# x0=(1:791)
# x1=(2:792) this is a 1-step advanced version of x0
# Observiously, shift-invariancy is preserved between x0 and x1 since
# x0(2:791)=x1(1:790)
# For shift-invariancy to be preserved for SSA, we would observe, for
# example, that the SSA Trend components for x0 (imf1 of x0) and x1 (imf1 of
# x1) should be exact copies of one another, advanced by a single step.
# i.e., x0_imf(2:791,1) should equal x1_imf(1:790,1) if shift-invariancy
# is preserved.
# As the case for SSA shown below, we can see the x0_imf(2:791,1) basically
# equal to x1_imf(1:790,1) except for a few samples close to the begin and
# end of x0 and x1. Interestingly, we see a low level of error close to the
# begin of the time series and a high level of error close to the end of
# the time series, of high importance in operational forecasting tasks.
# The errors along the middle range are zeros indicating SSA is
# shift-invariant.
# We argue that the error close to the boundaries are
# caused by boundary effect, which is the exact problem this study designed
# to solve.
# CHECK 2: The impact of appedning data points to a time series then
# performing SSA, analogous the case in operational forecasting when new
# data becomes available and an updated forecast is made using the newly
# arrived data.
# Ideally, for forecasting situations, when new data is appended to a time
# series and some preprocessing is performed, it should not have an impact
# on previous measurements of the pre-processed time series.
# For example, if Trend_1:N represents the Trend, which has N total
# measurements and was derived by applying SSA to x_1:N the we would expect
# that when we perform SSA when x is appended with another measurement,
# i.e., x_1:N+1, resulting in Trend_1:N+1 that the first 1:N measurements in
# Trend_1:N+1 are equal to Trend_1:N. In other words,
# Trend_1:N+1[1:N]=Trend_1:N[1:N].
# We see than is not the case. Appending an additional observation to the
# time series results in the updated SSA components to be entirely
# different then the original (as of yet updated) SSA components.
# Interesting, we see a high level of error at the boundaries of the time
# seriesm, of high importance in operational forecasting tasks.
x0_imf =
|
pd.read_csv(root_path+'/boundary_effect/ssa-decompositions-huaxian/x0_dec.csv')
|
pandas.read_csv
|
from prunedcv import PrunedCV, PrunedGridSearchCV
from sklearn.datasets import fetch_california_housing, load_wine
from sklearn.linear_model import LogisticRegression
from lightgbm import LGBMRegressor
import numpy as np
import pandas as pd
import pytest
def test_pruner_prun_yes():
pruner = PrunedCV(4, 0.1)
for i in range(6):
pruner._add_split_value_and_prun(1.0)
pruner._add_split_value_and_prun(10000.0)
assert pruner.prune
def test_pruner_prun_no():
pruner = PrunedCV(4, 0.1)
for i in range(4):
pruner._add_split_value_and_prun(1.0)
for i in range(3):
pruner._add_split_value_and_prun(.6)
assert not pruner.prune
def test_pruner_prun_back():
pruner = PrunedCV(4, 0.1)
for i in range(4):
pruner._add_split_value_and_prun(1.0)
for i in range(2):
pruner._add_split_value_and_prun(10000.0)
for i in range(3):
pruner._add_split_value_and_prun(1.0)
assert not pruner.prune
def test_prun_first_run():
pruner = PrunedCV(4, 0.1)
for i in range(4):
pruner._add_split_value_and_prun(1.0)
assert pruner.best_splits_list_ == [1.0, 1.0, 1.0, 1.0]
def test_prun_first_run_check():
pruner = PrunedCV(4, 0.1)
for i in range(4):
pruner._add_split_value_and_prun(1.0)
assert not pruner.first_run_
def test_prun_folds_int():
with pytest.raises(TypeError):
pruner = PrunedCV(1.1, 0.1)
pruner._add_split_value_and_prun(1)
def test_prun_folds_num():
with pytest.raises(ValueError):
pruner = PrunedCV(1, 0.1)
pruner._add_split_value_and_prun(1)
def test_prun_vals_type():
with pytest.raises(TypeError):
pruner = PrunedCV(4, 0.1)
pruner._add_split_value_and_prun(1)
def test_prun_score_val_constant():
pruner = PrunedCV(4, 0.1)
for i in range(8):
pruner._add_split_value_and_prun(1.0)
assert pruner.cross_val_score_value == 1.0
def test_prun_score_val_dec():
pruner = PrunedCV(4, 0.1)
for i in range(7):
pruner._add_split_value_and_prun(1.0)
pruner._add_split_value_and_prun(.9)
assert pruner.cross_val_score_value < 1.0
def test_prun_score_val_inc():
pruner = PrunedCV(4, 0.1)
for i in range(7):
pruner._add_split_value_and_prun(1.0)
pruner._add_split_value_and_prun(1.1)
assert pruner.cross_val_score_value > 1.0
def test_prun_score_val_best():
pruner = PrunedCV(4, 0.1)
for i in range(7):
pruner._add_split_value_and_prun(1.0)
pruner._add_split_value_and_prun(1.1)
assert sum(pruner.best_splits_list_) / pruner.cv == 1.0
def test_prun_pruned_cv_score():
pruner = PrunedCV(4, 0.1)
for i in range(4):
pruner._add_split_value_and_prun(1.0)
for i in range(2):
pruner._add_split_value_and_prun(2.0)
assert pruner.cross_val_score_value == 2.0
def test_prun_3models():
data = fetch_california_housing()
x = data['data']
y = data['target']
pruner = PrunedCV(cv=8, tolerance=.1)
model1 = LGBMRegressor(max_depth=25)
model2 = LGBMRegressor(max_depth=10)
model3 = LGBMRegressor(max_depth=2)
score1 = pruner.cross_val_score(model1, x, y, shuffle=True, random_state=42)
score2 = pruner.cross_val_score(model2, x, y, shuffle=True, random_state=42)
score3 = pruner.cross_val_score(model3, x, y, shuffle=True, random_state=42)
assert (sum(pruner.best_splits_list_) / pruner.cv == score2) and (score2 < score1) and (score2 < score3)
def test_prun_cv_x():
with pytest.raises(TypeError):
pruner = PrunedCV(cv=4, tolerance=.1)
model = LGBMRegressor()
x = [1, 2, 3]
y = np.array([1, 2, 3])
pruner.cross_val_score(model, x, y)
def test_prun_cv_y():
with pytest.raises(TypeError):
pruner = PrunedCV(cv=4, tolerance=.1)
model = LGBMRegressor()
y = [1, 2, 3]
x = np.array([1, 2, 3])
pruner.cross_val_score(model, x, y)
def test_prun_cv_xy():
with pytest.raises(TypeError):
pruner = PrunedCV(cv=4, tolerance=.1)
model = LGBMRegressor()
y = [1, 2, 3]
x = [1, 2, 3]
pruner.cross_val_score(model, x, y)
def test_prun_cv_x_df():
data = fetch_california_housing()
x =
|
pd.DataFrame(data['data'])
|
pandas.DataFrame
|
import unittest
import os
from collections import defaultdict
from unittest import mock
import warnings
import pandas as pd
import numpy as np
from dataprofiler.profilers import FloatColumn
from dataprofiler.profilers.profiler_options import FloatOptions
test_root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
class TestFloatColumn(unittest.TestCase):
def test_base_case(self):
data = pd.Series([], dtype=object)
profiler = FloatColumn(data.name)
profiler.update(data)
self.assertEqual(profiler.match_count, 0)
self.assertEqual(profiler.min, None)
self.assertEqual(profiler.max, None)
self.assertEqual(profiler.sum, 0)
self.assertEqual(profiler.mean, 0)
self.assertTrue(profiler.median is np.nan)
self.assertEqual([np.nan], profiler.mode)
self.assertTrue(profiler.variance is np.nan)
self.assertTrue(profiler.skewness is np.nan)
self.assertTrue(profiler.kurtosis is np.nan)
self.assertTrue(profiler.stddev is np.nan)
self.assertIsNone(profiler.histogram_selection)
self.assertEqual(len(profiler.quantiles), 999)
self.assertIsNone(profiler.data_type_ratio)
def test_single_data_variance_case(self):
data = pd.Series([1.5]).apply(str)
profiler = FloatColumn(data.name)
profiler.update(data)
self.assertEqual(profiler.match_count, 1.0)
self.assertEqual(profiler.mean, 1.5)
self.assertTrue(profiler.variance is np.nan)
data = pd.Series([2.5]).apply(str)
profiler.update(data)
self.assertEqual(profiler.match_count, 2)
self.assertEqual(profiler.mean, 2.0)
self.assertEqual(profiler.variance, 0.5)
def test_profiled_precision(self):
"""
Checks whether the precision for the profiler is correct.
:return:
"""
df_1 = pd.Series([0.4, 0.3, 0.1, 0.1, 0.1]).apply(str)
df_2 = pd.Series([0.11, 0.11, 0.12, 2.11]).apply(str)
df_3 = pd.Series([4.114, 3.161, 2.512, 2.131]).apply(str)
df_mix = pd.Series([4.1, '3.', 2.52, 2.13143]).apply(str)
float_profiler = FloatColumn("Name")
float_profiler.update(df_3)
self.assertEqual(4, float_profiler.precision['min'])
self.assertEqual(4, float_profiler.precision['max'])
float_profiler.update(df_2)
self.assertEqual(2, float_profiler.precision['min'])
self.assertEqual(4, float_profiler.precision['max'])
float_profiler.update(df_1)
self.assertEqual(1, float_profiler.precision['min'])
self.assertEqual(4, float_profiler.precision['max'])
float_profiler = FloatColumn("Name")
float_profiler.update(df_mix)
self.assertEqual(1, float_profiler.precision['min'])
self.assertEqual(6, float_profiler.precision['max'])
# edge cases #
# integer with 0s on right and left side
df_ints = pd.Series(['0013245678', '123456700', '0012345600'])
float_profiler = FloatColumn("Name")
float_profiler.update(df_ints)
self.assertEqual(6, float_profiler.precision['min'])
self.assertEqual(8, float_profiler.precision['max'])
# scientific
df_scientific = pd.Series(['1.23e-3', '2.2344', '1.244e4'])
float_profiler = FloatColumn("Name")
float_profiler.update(df_scientific)
self.assertEqual(3, float_profiler.precision['min'])
self.assertEqual(5, float_profiler.precision['max'])
# plus
df_plus = pd.Series(['+1.3e-3', '+2.244', '+1.3324e4'])
float_profiler = FloatColumn("Name")
float_profiler.update(df_plus)
self.assertEqual(2, float_profiler.precision['min'])
self.assertEqual(5, float_profiler.precision['max'])
# minus
df_minus = pd.Series(['-1.3234e-3', '-0.244', '-1.3324e4'])
float_profiler = FloatColumn("Name")
float_profiler.update(df_minus)
self.assertEqual(3, float_profiler.precision['min'])
self.assertEqual(5, float_profiler.precision['max'])
# spaces around values
df_spaces = pd.Series([' -1.3234e-3 ', ' -0.244 '])
float_profiler = FloatColumn("Name")
float_profiler.update(df_spaces)
self.assertEqual(3, float_profiler.precision['min'])
self.assertEqual(5, float_profiler.precision['max'])
# constant precision
df_constant = pd.Series(['1.34', '+1.23e-4', '00101',
'+100.', '0.234', '-432', '.954',
'+.342', '-123e1', '23.1'])
float_profiler = FloatColumn("Name")
float_profiler.update(df_constant)
self.assertEqual(3, float_profiler.precision['min'])
self.assertEqual(3, float_profiler.precision['max'])
self.assertEqual(3, float_profiler.precision['mean'])
self.assertEqual(10, float_profiler.precision['sample_size'])
self.assertEqual(0, float_profiler.precision['var'])
self.assertEqual(0, float_profiler.precision['std'])
# random precision
df_random = pd.Series(['+ 9', '-.3', '-1e-3', '3.2343', '0',
'1230', '0.33', '4.3', '302.1', '-4.322'])
float_profiler = FloatColumn("Name")
float_profiler.update(df_random)
self.assertEqual(0, float_profiler.precision['min'])
self.assertEqual(5, float_profiler.precision['max'])
self.assertEqual(2.4444, float_profiler.precision['mean'])
self.assertEqual(9, float_profiler.precision['sample_size'])
self.assertEqual(2.7778, float_profiler.precision['var'])
self.assertEqual(1.6667, float_profiler.precision['std'])
# Ensure order doesn't change anything
df_random_order = pd.Series(['1230', '0.33', '4.3', '302.1', '-4.322',
'+ 9', '-.3', '-1e-3', '3.2343', '0'])
float_profiler_order = FloatColumn("Name")
float_profiler_order.update(df_random)
self.assertDictEqual(
float_profiler.precision, float_profiler_order.precision
)
# check to make sure all formats of precision are correctly predicted
samples = [
# value, min expected precision
['10.01', 4],
['.01', 1],
['0.01', 1],
['-0.01', 1],
['+0.01', 1],
[' +0.013', 2],
[' -1.3234e-3 ', 5],
[' 0012345600 ', 6],
[' 0012345600. ', 8],
[' -0012345600. ', 8],
]
for sample in samples:
df_series = pd.Series([sample[0]])
min_expected_precision = sample[1]
precision = FloatColumn._get_float_precision(df_series)
self.assertEqual(min_expected_precision, precision['min'],
msg='Errored for: {}'.format(sample[0]))
def test_profiled_min(self):
# test with multiple values
data = np.linspace(-5, 5, 11)
df = pd.Series(data).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df[1:])
self.assertEqual(profiler.min, -4)
profiler.update(df)
self.assertEqual(profiler.min, -5)
profiler.update(pd.Series(['-4']))
self.assertEqual(profiler.min, -5)
# empty data
data = pd.Series([], dtype=object)
profiler = FloatColumn(data.name)
profiler.update(data)
self.assertEqual(profiler.min, None)
# data with None value
df = pd.Series([2.0, 3.0, None, np.nan]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertEqual(profiler.min, 2.0)
# data with one value
df = pd.Series([2.0]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertEqual(profiler.min, 2.0)
# data with unique value
df = pd.Series([2.0, 2.0, 2.0, 2.0, 2.0]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertEqual(profiler.min, 2.0)
# data with unique value as zero
df = pd.Series([0.0, 0.0, 0.0, 0.0, 0.0]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertEqual(profiler.min, 0.0)
def test_profiled_max(self):
data = np.linspace(-5, 5, 11)
df = pd.Series(data).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df[:-1])
self.assertEqual(profiler.max, 4)
profiler.update(df)
self.assertEqual(profiler.max, 5)
profiler.update(pd.Series(['4']))
self.assertEqual(profiler.max, 5)
# empty data
data = pd.Series([], dtype=object)
profiler = FloatColumn(data.name)
profiler.update(data)
self.assertEqual(profiler.max, None)
# data with None value
df = pd.Series([2.0, 3.0, None, np.nan]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertEqual(profiler.max, 3.0)
# data with one value
df = pd.Series([2.0]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertEqual(profiler.max, 2.0)
# data with unique value
df = pd.Series([2.0, 2.0, 2.0, 2.0, 2.0]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertEqual(profiler.max, 2.0)
# data with unique value as zero
df = pd.Series([0.0, 0.0, 0.0, 0.0, 0.0]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertEqual(profiler.max, 0.0)
def test_profiled_mode(self):
# disabled mode
df = pd.Series([1, 1, 1, 1, 1, 1, 1]).apply(str)
options = FloatOptions()
options.mode.is_enabled = False
profiler = FloatColumn(df.name, options)
profiler.update(df)
self.assertListEqual([np.nan], profiler.mode)
# same values
df = pd.Series([1, 1, 1, 1, 1, 1, 1]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertListEqual([1], profiler.mode)
# multiple modes
df = pd.Series([1.5, 1.5, 2.5, 2.5, 3.5, 3.5, 4.1, 4.1]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
np.testing.assert_array_almost_equal([1.5, 2.5, 3.5, 4.1], profiler.mode,
decimal=2)
# with different values
df = pd.Series([1.25, 1.25, 1.25, 1.25, 2.9]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
np.testing.assert_array_almost_equal([1.25], profiler.mode, decimal=2)
# with negative values
df = pd.Series([-1.1, 1.9, 1.9, 1.9, 2.1, 2.01, 2.01, 2.01]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
np.testing.assert_array_almost_equal([1.9, 2.01], profiler.mode,
decimal=2)
# all unique values
df = pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
# By default, returns 5 of the possible modes
np.testing.assert_array_almost_equal([1, 2, 3, 4, 5],
profiler.mode, decimal=2)
# Edge case where mode appears later in the dataset
df = pd.Series([1, 2, 3, 4, 5, 6.2, 6.2]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
np.testing.assert_array_almost_equal([6.2], profiler.mode, decimal=2)
df = pd.Series([2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7.1, 7.1, 7.1]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
np.testing.assert_array_almost_equal([7.1], profiler.mode, decimal=2)
def test_top_k_modes(self):
# Default options
options = FloatOptions()
df = pd.Series([1, 1, 2, 2, 3, 3, 4, 4, 5, 5]).apply(str)
profiler = FloatColumn(df.name, options)
profiler.update(df)
self.assertEqual(5, len(profiler.mode))
# Test if top_k_modes is less than the number of modes
options = FloatOptions()
options.mode.top_k_modes = 2
df = pd.Series([1, 1, 2, 2, 3, 3, 4, 4, 5, 5]).apply(str)
profiler = FloatColumn(df.name, options)
profiler.update(df)
self.assertEqual(2, len(profiler.mode))
# Test if top_k_mode is greater than the number of modes
options = FloatOptions()
options.mode.top_k_modes = 8
df = pd.Series([1, 1, 2, 2, 3, 3, 4, 4, 5, 5]).apply(str)
profiler = FloatColumn(df.name, options)
profiler.update(df)
# Only 5 possible modes so return 5
self.assertEqual(5, len(profiler.mode))
def test_profiled_median(self):
# disabled median
df = pd.Series([1, 1, 1, 1, 1, 1, 1]).apply(str)
options = FloatOptions()
options.median.is_enabled = False
profiler = FloatColumn(df.name, options)
profiler.update(df)
self.assertTrue(profiler.median is np.nan)
# same values
df = pd.Series([1, 1, 1, 1, 1, 1, 1]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertEqual(1, profiler.median)
# median lies between two values (2.5 and 3.5)
df = pd.Series([1.5, 1.5, 2.5, 2.5, 3.5, 3.5, 4.1, 4.1]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertAlmostEqual(3, profiler.median, places=2)
# with different values
df = pd.Series([1.25, 1.25, 1.25, 1.25, 2.9]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertAlmostEqual(1.25, profiler.median, places=2)
# with negative values, median lies in between values
df = pd.Series([-1.1, 1.9, 1.9, 1.9, 2.1, 2.1, 2.1, 2.1]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertAlmostEqual(2, profiler.median, places=2)
# all unique values
df = pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertAlmostEqual(5, profiler.median, places=2)
def test_profiled_mean_and_variance(self):
"""
Checks the mean and variance of profiled numerical columns.
:return:
"""
def mean(df):
total = 0
for item in df:
total += item
return total / len(df)
def var(df):
var = 0
mean_df = mean(df)
for item in df:
var += (item - mean_df) ** 2
return var / (len(df) - 1)
def batch_variance(mean_a, var_a, count_a, mean_b, var_b, count_b):
delta = mean_b - mean_a
m_a = var_a * (count_a - 1)
m_b = var_b * (count_b - 1)
M2 = m_a + m_b + delta ** 2 * count_a * count_b / (
count_a + count_b)
return M2 / (count_a + count_b - 1)
data = np.linspace(-5, 5, 11).tolist()
df1 = pd.Series(data)
data = np.linspace(-3, 2, 11).tolist()
df2 = pd.Series(data)
data = np.full((10,), 1)
df3 = pd.Series(data)
num_profiler = FloatColumn(df1.name)
num_profiler.update(df1.apply(str))
self.assertEqual(mean(df1), num_profiler.mean)
self.assertEqual(var(df1), num_profiler.variance)
self.assertEqual(np.sqrt(var(df1)), num_profiler.stddev)
variance = batch_variance(
mean_a=num_profiler.mean, var_a=num_profiler.variance,
count_a=num_profiler.match_count,
mean_b=mean(df2), var_b=var(df2), count_b=df2.count()
)
num_profiler.update(df2.apply(str))
df = pd.concat([df1, df2])
self.assertEqual(mean(df), num_profiler.mean)
self.assertEqual(variance, num_profiler.variance)
self.assertEqual(np.sqrt(variance), num_profiler.stddev)
variance = batch_variance(
mean_a=num_profiler.mean, var_a=num_profiler.variance,
count_a=num_profiler.match_count,
mean_b=mean(df3), var_b=var(df3), count_b=df3.count()
)
num_profiler.update(df3.apply(str))
df = pd.concat([df1, df2, df3])
self.assertEqual(mean(df), num_profiler.mean)
self.assertEqual(variance, num_profiler.variance)
self.assertEqual(np.sqrt(variance), num_profiler.stddev)
def test_profiled_skewness(self):
data = np.linspace(-5, 5, 11).tolist()
df1 = pd.Series(data)
data = np.linspace(-3, 2, 11).tolist()
df2 = pd.Series(data)
data = np.full((10,), 1)
df3 = pd.Series(data)
num_profiler = FloatColumn(df1.name)
num_profiler.update(df1.apply(str))
self.assertEqual(0, num_profiler.skewness)
num_profiler.update(df2.apply(str))
self.assertAlmostEqual(np.sqrt(22 * 21) / 20 * 133 / 750, num_profiler.skewness)
num_profiler.update(df3.apply(str))
self.assertAlmostEqual(-0.3109967, num_profiler.skewness)
def test_profiled_kurtosis(self):
data = np.linspace(-5, 5, 11).tolist()
df1 = pd.Series(data)
data = np.linspace(-3, 2, 11).tolist()
df2 = pd.Series(data)
data = np.full((10,), 1)
df3 = pd.Series(data)
num_profiler = FloatColumn(df1.name)
num_profiler.update(df1.apply(str))
self.assertAlmostEqual(-6 / 5, num_profiler.kurtosis)
num_profiler.update(df2.apply(str))
self.assertAlmostEqual(-0.390358, num_profiler.kurtosis)
num_profiler.update(df3.apply(str))
self.assertAlmostEqual(0.3311739, num_profiler.kurtosis)
def test_bias_correction_option(self):
# df1 = [-5, -4, ..., 3, 4, 5]
data = np.linspace(-5, 5, 11).tolist()
df1 = pd.Series(data)
# df2 = [-3, -2.5, -2, ..., 1.5, 2]
data = np.linspace(-3, 2, 11).tolist()
df2 = pd.Series(data)
# df3 = [1, 1, ... , 1] (ten '1's)
data = np.full((10,), 1)
df3 = pd.Series(data)
# Disable bias correction
options = FloatOptions(); options.bias_correction.is_enabled = False
num_profiler = FloatColumn(df1.name, options=options)
num_profiler.update(df1.apply(str))
# Test biased values of variance, skewness, kurtosis
self.assertAlmostEqual(10, num_profiler.variance)
self.assertAlmostEqual(0, num_profiler.skewness)
self.assertAlmostEqual(89/50 - 3, num_profiler.kurtosis)
df2_ints = df2[df2 == df2.round()]
num_profiler.update(df2.apply(str))
df = pd.concat([df1, df2_ints])
self.assertAlmostEqual(6.3125, num_profiler.variance)
self.assertAlmostEqual(0.17733336, num_profiler.skewness)
self.assertAlmostEqual(-0.56798353, num_profiler.kurtosis)
df3_ints = df3[df3 == df3.round()]
num_profiler.update(df3.apply(str))
df = pd.concat([df1, df2_ints, df3_ints])
self.assertAlmostEqual(4.6755371, num_profiler.variance)
self.assertAlmostEqual(-0.29622465, num_profiler.skewness)
self.assertAlmostEqual(0.099825352, num_profiler.kurtosis)
def test_bias_correction_merge(self):
data = np.linspace(-5, 5, 11).tolist()
df1 = pd.Series(data)
data = np.linspace(-3, 2, 11).tolist()
df2 = pd.Series(data)
data = np.full((10,), 1)
df3 = pd.Series(data)
# Disable bias correction
options = FloatOptions();
options.bias_correction.is_enabled = False
num_profiler1 = FloatColumn(df1.name, options=options)
num_profiler1.update(df1.apply(str))
self.assertAlmostEqual(10, num_profiler1.variance)
self.assertAlmostEqual(0, num_profiler1.skewness)
self.assertAlmostEqual(89 / 50 - 3, num_profiler1.kurtosis)
num_profiler2 = FloatColumn(df2.name)
num_profiler2.update(df2.apply(str))
num_profiler = num_profiler1 + num_profiler2
self.assertFalse(num_profiler.bias_correction)
self.assertAlmostEqual(6.3125, num_profiler.variance)
self.assertAlmostEqual(0.17733336, num_profiler.skewness)
self.assertAlmostEqual(-0.56798353, num_profiler.kurtosis)
num_profiler3 = FloatColumn(df3.name)
num_profiler3.update(df3.apply(str))
num_profiler = num_profiler1 + num_profiler2 + num_profiler3
self.assertFalse(num_profiler.bias_correction)
self.assertAlmostEqual(4.6755371, num_profiler.variance)
self.assertAlmostEqual(-0.29622465, num_profiler.skewness)
self.assertAlmostEqual(0.099825352, num_profiler.kurtosis)
def test_null_values_for_histogram(self):
data = pd.Series(['-inf', 'inf'])
profiler = FloatColumn(data.name)
profiler.update(data)
profile = profiler.profile
histogram = profile['histogram']
self.assertEqual(histogram['bin_counts'], None)
self.assertEqual(histogram['bin_edges'], None)
data = pd.Series(['-2', '-1', '1', '2', '-inf', 'inf'])
profiler = FloatColumn(data.name)
profiler.update(data)
profile = profiler.profile
histogram = profile['histogram']
expected_histogram = {
'bin_counts': np.array([1, 1, 0, 2]),
'bin_edges': np.array([-2., -1., 0., 1., 2.]),
}
self.assertEqual(expected_histogram['bin_counts'].tolist(),
histogram['bin_counts'].tolist())
self.assertCountEqual(expected_histogram['bin_edges'],
histogram['bin_edges'])
def test_profiled_histogram(self):
"""
Checks the histogram of profiled numerical columns.
:return:
"""
list_data_test = []
# this data has 4 bins, range of 3
# with equal bin size, each bin has the width of 0.75
df1 = pd.Series(["1.0", "2.0", "3.0", "4.0"])
expected_histogram1 = {
'bin_counts': np.array([1, 1, 1, 1]),
'bin_edges': np.array([1.0, 1.75, 2.5, 3.25, 4.0]),
}
list_data_test.append([df1, expected_histogram1])
# this data has 4 bins, range of 12
# with equal bin size, each bin has the width of 3.0
df2 = pd.Series(["1.0", "5.0", "8.0", "13.0"])
expected_histogram2 = {
'bin_counts': np.array([1, 1, 1, 1]),
'bin_edges': np.array([1.0, 4.0, 7.0, 10.0, 13.0]),
}
list_data_test.append([df2, expected_histogram2])
# this data has 3 bins, range of 3
# with equal bin size, each bin has the width of 1
df3 = pd.Series(["1.0", "1.0", "3.0", "4.0"])
expected_histogram3 = {
'bin_counts': np.array([2, 0, 1, 1]),
'bin_edges': np.array([1.0, 1.75, 2.5, 3.25, 4.0]),
}
list_data_test.append([df3, expected_histogram3])
# this data has only one unique value, not overflow
df4 = pd.Series([-10.0, -10.0, -10.0]).apply(str)
expected_histogram4 = {
'bin_counts': np.array([3]),
'bin_edges': np.array([-10.0, -10.0]),
}
list_data_test.append([df4, expected_histogram4])
# this data has only one unique value, overflow
df5 = pd.Series([-10.0 ** 20]).apply(str)
expected_histogram5 = {
'bin_counts': np.array([1]),
'bin_edges': np.array([-10.0 ** 20, -10.0 ** 20]),
}
list_data_test.append([df5, expected_histogram5])
for i, (df, expected_histogram) in enumerate(list_data_test):
profiler = FloatColumn(df.name)
profiler.update(df)
profile = profiler.profile
histogram = profile['histogram']
self.assertEqual(expected_histogram['bin_counts'].tolist(),
histogram['bin_counts'].tolist())
if i != 4:
self.assertCountEqual(np.round(expected_histogram['bin_edges'], 12),
np.round(histogram['bin_edges'], 12))
else: # for overflow, dont use np.round
self.assertCountEqual(expected_histogram['bin_edges'],
histogram['bin_edges'])
def test_profile_histogram_w_updates(self):
"""
Checks if histogram properly resets the _profiled histogram after
merge or update.
:return:
"""
list_data_test = []
# this data has 4 bins, range of 3
# with equal bin size, each bin has the width of 0.75
df1 = pd.Series(["1.0", "2.0", "3.0", "4.0"])
expected_histogram1 = {
'bin_counts': np.array([1, 1, 1, 1]),
'bin_edges': np.array([1.0, 1.75, 2.5, 3.25, 4.0]),
}
list_data_test.append([df1, expected_histogram1])
# this data will be the second update of the profile.
# this results in the combination of the previous data and this data.
# the range should update to 12 from 3.
df2 = pd.Series(["1.0", "5.0", "8.0", "13.0"])
expected_histogram2 = {
'bin_counts': np.array([4, 1, 1, 1, 0, 1]),
'bin_edges': np.array([1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0]),
}
list_data_test.append([df2, expected_histogram2])
profiler = FloatColumn("test")
for i, (df, expected_histogram) in enumerate(list_data_test):
profiler.update(df)
self.assertIsNone(profiler.histogram_selection)
profile = profiler.profile
self.assertIsNotNone(profiler.histogram_selection)
histogram = profile['histogram']
self.assertEqual(expected_histogram['bin_counts'].tolist(),
histogram['bin_counts'].tolist())
self.assertCountEqual(np.round(expected_histogram['bin_edges'], 12),
np.round(histogram['bin_edges'], 12))
# apply test to merging profiles
expected_histogram = {
'bin_edges': np.array([1., 19/7, 31/7, 43/7, 55/7, 67/7, 79/7,
13.]),
'bin_counts': np.array([6, 4, 2, 0, 2, 0, 2])
}
merged_profiler = profiler + profiler
self.assertIsNone(merged_profiler.histogram_selection)
profile = merged_profiler.profile
self.assertIsNotNone(merged_profiler.histogram_selection)
histogram = profile['histogram']
self.assertEqual(expected_histogram['bin_counts'].tolist(),
histogram['bin_counts'].tolist())
self.assertCountEqual(np.round(expected_histogram['bin_edges'], 12),
np.round(histogram['bin_edges'], 12))
def test_histogram_with_varying_number_of_bin(self):
"""
Checks the histogram with large number of bins
"""
# this data use number of bins less than the max limit
df1 = pd.Series([1, 2, 3, 4]).apply(str)
profiler1 = FloatColumn(df1.name)
profiler1.max_histogram_bin = 50
profiler1.update(df1)
num_bins = len(profiler1.profile['histogram']['bin_counts'])
self.assertEqual(4, num_bins)
# this data uses large number of bins, which will be set to
# the max limit
df2 = pd.Series([3.195103249264023e+18, 9999995.0, 9999999.0,
0.0, -10 ** 10]).apply(str)
profiler2 = FloatColumn(df2.name)
profiler2.max_histogram_bin = 50
profiler2.update(df2)
num_bins = len(profiler2.profile['histogram']['bin_counts'])
self.assertEqual(50, num_bins)
# max number of bin is increased to 10000
profiler2 = FloatColumn(df2.name)
profiler2.max_histogram_bin = 10000
profiler2.update(df2)
num_bins = len(profiler2.profile['histogram']['bin_counts'])
self.assertEqual(10000, num_bins)
def test_estimate_stats_from_histogram(self):
data = pd.Series([], dtype=object)
profiler = FloatColumn(data.name)
profiler.update(data)
profiler._stored_histogram['histogram']['bin_counts'] = \
np.array([1, 2, 1])
profiler._stored_histogram['histogram']['bin_edges'] = \
np.array([1.0, 3.0, 5.0, 7.0])
expected_mean = (2.0 * 1 + 4.0 * 2 + 6.0 * 1) / 4
expected_var = (1 * (2.0 - expected_mean) ** 2
+ 2 * (4.0 - expected_mean) ** 2
+ 1 * (6.0 - expected_mean) ** 2) / 4
expected_std = np.sqrt(expected_var)
est_var = profiler._estimate_stats_from_histogram()
self.assertEqual(expected_var, est_var)
def test_total_histogram_bin_variance(self):
data = pd.Series([], dtype=object)
profiler = FloatColumn(data.name)
profiler.update(data)
profiler._stored_histogram['histogram']['bin_counts'] = \
np.array([3, 2, 1])
profiler._stored_histogram['histogram']['bin_edges'] = \
np.array([1.0, 3.0, 5.0, 7.0])
input_array = np.array([1.1, 1.5, 2.3, 3.5, 4.0, 6.5])
expected_total_var = np.array([1.1, 1.5, 2.3]).var() \
+ np.array([3.5, 4.0]).var() \
+ np.array([6.5]).var()
est_total_var = profiler._total_histogram_bin_variance(input_array)
self.assertEqual(expected_total_var, est_total_var)
def test_histogram_loss(self):
# run time is small
diff_var, avg_diffvar, total_var, avg_totalvar, run_time, avg_runtime =\
0.3, 0.2, 0.1, 0.05, 0.0014, 0.0022
expected_loss = 0.1 / 0.2 + 0.05 / 0.05
est_loss = FloatColumn._histogram_loss(
diff_var, avg_diffvar, total_var, avg_totalvar, run_time,
avg_runtime)
self.assertEqual(expected_loss, est_loss)
# run time is big
diff_var, avg_diffvar, total_var, avg_totalvar, run_time, avg_runtime =\
0.3, 0.2, 0.1, 0.05, 22, 14
expected_loss = 0.1 / 0.2 + 0.05 / 0.05 + 8 / 14
est_loss = FloatColumn._histogram_loss(
diff_var, avg_diffvar, total_var, avg_totalvar, run_time,
avg_runtime)
self.assertEqual(expected_loss, est_loss)
def test_select_method_for_histogram(self):
data = pd.Series([], dtype=object)
profiler = FloatColumn(data.name)
profiler.update(data)
list_method = ['auto', 'fd', 'doane', 'scott', 'rice', 'sturges',
'sqrt']
current_exact_var = 0
# sqrt has the least current loss
current_est_var = np.array([0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.005])
current_total_var = np.array([0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01])
current_run_time = np.array([0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01])
# all methods have the same total loss
list_total_loss = np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
for i, method in enumerate(list_method):
profiler.histogram_methods[method]['total_loss'] = \
list_total_loss[i]
selected_method = profiler._select_method_for_histogram(
current_exact_var, current_est_var,
current_total_var, current_run_time)
self.assertEqual(selected_method, 'sqrt')
# another test
current_exact_var = 0
# sqrt has the least current loss
current_est_var = np.array([0.03, 0.03, 0.03, 0.03, 0.03, 0.03, 0.029])
current_total_var = np.array([0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01])
current_run_time = np.array([0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01])
# but sturges has the least total loss
list_total_loss = np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.05, 0.1])
for i, method in enumerate(list_method):
profiler.histogram_methods[method]['total_loss'] = \
list_total_loss[i]
selected_method = profiler._select_method_for_histogram(
current_exact_var, current_est_var,
current_total_var, current_run_time)
self.assertEqual(selected_method, 'sturges')
def test_histogram_to_array(self):
data = pd.Series([], dtype=object)
profiler = FloatColumn(data.name)
profiler.update(data)
profiler._stored_histogram['histogram']['bin_counts'] = \
np.array([3, 2, 1])
profiler._stored_histogram['histogram']['bin_edges'] = \
np.array([1.0, 3.0, 5.0, 7.0])
array_from_histogram = profiler._histogram_to_array()
expected_array = [1.0, 1.0, 1.0, 3.0, 3.0, 7.0]
self.assertEqual(expected_array, array_from_histogram.tolist())
def test_merge_histogram(self):
data = pd.Series([], dtype=object)
profiler = FloatColumn(data.name)
profiler.update(data)
profiler._stored_histogram['histogram']['bin_counts'] = np.array([3, 2])
profiler._stored_histogram['histogram']['bin_edges'] = \
np.array([1.0, 3.0, 5.0])
input_array = [0.5, 1.0, 2.0, 5.0]
profiler._merge_histogram(input_array)
merged_hist = profiler._histogram_for_profile('sqrt')[0]
expected_bin_counts, expected_bin_edges = \
[5, 2, 2], [0.5, 2.0, 3.5, 5.0]
self.assertEqual(expected_bin_counts,
merged_hist['bin_counts'].tolist())
self.assertCountEqual(expected_bin_edges, merged_hist['bin_edges'])
def test_profiled_quantiles(self):
"""
Checks the quantiles of profiled numerical columns.
:return:
"""
# this data has 4 bins, range of 3
# with equal bin size, each bin has the width of 0.75
data = ["1.0", "2.0", "3.0", "4.0"]
df = pd.Series(data)
profiler = FloatColumn(df.name)
profiler.update(df)
profile = profiler.profile
est_quantiles = profile['quantiles']
est_Q1 = est_quantiles[249]
est_Q2 = est_quantiles[499]
est_Q3 = est_quantiles[749]
self.assertEqual(999, len(est_quantiles))
self.assertAlmostEqual(1.000012, est_quantiles[0])
self.assertEqual(est_Q1, 1.003)
self.assertEqual(est_Q2, 2.5)
self.assertEqual(est_Q3, 3.001)
self.assertAlmostEqual(3.999988, est_quantiles[-1])
def test_data_type_ratio(self):
data = np.linspace(-5, 5, 4)
df = pd.Series(data).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertEqual(profiler.data_type_ratio, 1.0)
df = pd.Series(['not a float'])
profiler.update(df)
self.assertEqual(profiler.data_type_ratio, 0.8)
def test_profile(self):
data = [2.5, 12.5, 'not a float', 5, 'not a float']
df = pd.Series(data).apply(str)
profiler = FloatColumn(df.name)
expected_profile = dict(
min=2.5,
max=12.5,
mode=[2.5, 5, 12.5],
median=5,
sum=20.0,
mean=20/3.0,
variance=27 + 1/12.0,
skewness=35/13*np.sqrt(3/13),
kurtosis=np.nan,
num_negatives = 0,
num_zeros = 0,
stddev=np.sqrt(27+1/12.0),
histogram={
'bin_counts': np.array([1, 1, 0, 1]),
'bin_edges': np.array([2.5, 5.0, 7.5, 10.0, 12.5]),
},
quantiles={
0: 2.5075,
1: 5.005 ,
2: 12.4925,
},
times=defaultdict(float, {'histogram_and_quantiles': 1.0,
'precision': 1.0, 'max': 1.0, 'min': 1.0,
'skewness': 1.0,
'kurtosis': 1.0, 'sum': 1.0, 'variance': 1.0,
'num_zeros': 1.0, 'num_negatives': 1.0}),
precision={
'min': 1,
'max': 3,
'mean': 2.0,
'var': 1.0,
'std': 1.0,
'sample_size': 3,
'margin_of_error': 1.9,
'confidence_level': 0.999
}
)
time_array = [float(i) for i in range(100, 0, -1)]
with mock.patch('time.time', side_effect=lambda: time_array.pop()):
# Validate that the times dictionary is empty
self.assertEqual(defaultdict(float), profiler.profile['times'])
profiler.update(df)
profile = profiler.profile
# Validate mode
mode = profile.pop('mode')
expected_mode = expected_profile.pop('mode')
np.testing.assert_array_almost_equal(expected_mode, mode, decimal=2)
# pop out the histogram to test separately from the rest of the dict
# as we need comparison with some precision
histogram = profile.pop('histogram')
expected_histogram = expected_profile.pop('histogram')
quantiles = profile.pop('quantiles')
expected_quantiles = expected_profile.pop('quantiles')
median = profile.pop('median')
expected_median = expected_profile.pop('median')
skewness = profile.pop('skewness')
expected_skewness = expected_profile.pop('skewness')
variance = profile.pop('variance')
expected_variance = expected_profile.pop('variance')
self.assertDictEqual(expected_profile, profile)
self.assertDictEqual(expected_profile['precision'], profile['precision'])
self.assertEqual(expected_histogram['bin_counts'].tolist(),
histogram['bin_counts'].tolist())
self.assertCountEqual(np.round(expected_histogram['bin_edges'], 12),
np.round(histogram['bin_edges'], 12))
self.assertAlmostEqual(expected_quantiles[0], quantiles[249])
self.assertAlmostEqual(expected_quantiles[1], quantiles[499])
self.assertAlmostEqual(expected_quantiles[2], quantiles[749])
self.assertAlmostEqual(expected_skewness, skewness)
self.assertAlmostEqual(expected_variance, variance)
self.assertAlmostEqual(expected_median, median, places=2)
# Validate time in datetime class has expected time after second update
profiler.update(df)
expected = defaultdict(float, {'min': 2.0, 'max': 2.0,
'sum': 2.0, 'variance': 2.0,
'precision': 2.0,
'histogram_and_quantiles': 2.0,
'skewness': 2.0, 'kurtosis': 2.0,
'num_negatives': 2.0,
'num_zeros': 2.0,})
self.assertEqual(expected, profiler.profile['times'])
def test_option_precision(self):
data = [1.1, 2.2, 3.3, 4.4]
df = pd.Series(data).apply(str)
# Turn off precision
options = FloatOptions()
options.set({"precision.is_enabled": False})
profiler = FloatColumn(df.name, options=options)
profiler.update(df)
self.assertEqual(None, profiler.precision['sample_size'])
# Turn on precision, check sample_size
options = FloatOptions()
options.set({"precision.is_enabled": True})
profiler = FloatColumn(df.name, options=options)
profiler.update(df)
self.assertEqual(4, profiler.precision['sample_size'])
# Turn on precision, set 0.5 sample_size
options = FloatOptions()
options.set({"precision.sample_ratio": 0.5})
profiler = FloatColumn(df.name, options=options)
profiler.update(df)
self.assertEqual(2, profiler.precision['sample_size'])
def test_option_timing(self):
data = [2.0, 12.5, 'not a float', 6.0, 'not a float']
df = pd.Series(data).apply(str)
options = FloatOptions()
options.set({"min.is_enabled": False})
profiler = FloatColumn(df.name, options=options)
time_array = [float(i) for i in range(100, 0, -1)]
with mock.patch('time.time', side_effect=lambda: time_array.pop()):
# Validate that the times dictionary is empty
self.assertEqual(defaultdict(float), profiler.profile['times'])
profiler.update(df)
# Validate the time in the datetime class has the expected time.
profile = profiler.profile
expected = defaultdict(float, {'max': 1.0, 'sum': 1.0,
'variance': 1.0,
'precision': 1.0, 'skewness': 1.0,
'kurtosis': 1.0, 'num_negatives': 1.0,
'num_zeros': 1.0,
'histogram_and_quantiles': 15.0})
self.assertCountEqual(expected, profile['times'])
# Validate time in datetime class has expected time after second update
profiler.update(df)
expected = defaultdict(float, {'max': 2.0, 'sum': 2.0,
'variance': 2.0,
'precision': 2.0, 'skewness': 2.0,
'kurtosis': 2.0, 'num_negatives': 2.0,
'num_zeros': 2.0,
'histogram_and_quantiles': 30.0})
self.assertCountEqual(expected, profiler.profile['times'])
def test_profile_merge(self):
data = [2.0, 'not a float', 6.0, 'not a float']
df = pd.Series(data).apply(str)
profiler1 = FloatColumn("Float")
profiler1.update(df)
data2 = [10.0, 'not a float', 15.0, 'not a float']
df2 = pd.Series(data2).apply(str)
profiler2 = FloatColumn("Float")
profiler2.update(df2)
expected_profile = dict(
min=2.0,
max=15.0,
mode=[2, 6, 10, 15],
sum=33.0,
mean=8.25,
variance=30.916666666666668,
stddev=np.sqrt(30.916),
skewness=918 * np.sqrt(3 / 371) / 371,
kurtosis=-16068/19663,
histogram={
'bin_counts': np.array([1, 1, 1, 1]),
'bin_edges': np.array([2., 5.25, 8.5, 11.75, 15.])
},
)
profiler3 = profiler1 + profiler2
expected_histogram = expected_profile.pop('histogram')
profile3 = profiler3.profile
histogram = profile3.pop('histogram')
expected_mode = expected_profile.pop('mode')
mode = profile3.pop('mode')
np.testing.assert_array_almost_equal(expected_mode, mode, decimal=2)
self.assertTrue(profiler3.bias_correction)
self.assertAlmostEqual(profiler3.stddev,
expected_profile.pop('stddev'), places=3)
self.assertAlmostEqual(profiler3.variance,
expected_profile.pop('variance'), places=3)
self.assertAlmostEqual(profiler3.skewness,
expected_profile.pop('skewness'),places=3)
self.assertAlmostEqual(profiler3.kurtosis,
expected_profile.pop('kurtosis'), places=3)
self.assertEqual(profiler3.mean, expected_profile.pop('mean'))
self.assertEqual(profiler3.histogram_selection, 'doane')
self.assertEqual(profiler3.min, expected_profile.pop('min'))
self.assertEqual(profiler3.max, expected_profile.pop('max'))
self.assertEqual(histogram['bin_counts'].tolist(),
expected_histogram['bin_counts'].tolist())
self.assertCountEqual(histogram['bin_edges'],
expected_histogram['bin_edges'])
def test_profile_merge_for_zeros_and_negatives(self):
data = [2.0, 8.5, 'not an int', 6.0, -3, 0]
df = pd.Series(data).apply(str)
profiler1 = FloatColumn("Float")
profiler1.update(df)
data2 = [0.0, 3.5, 'not an int', 125.0, 0, -0.1, -88]
df2 = pd.Series(data2).apply(str)
profiler2 = FloatColumn("Float")
profiler2.update(df2)
expected_profile = dict(
num_zeros=3,
num_negatives=3
)
profiler3 = profiler1 + profiler2
self.assertEqual(profiler3.num_zeros,
expected_profile.pop('num_zeros'))
self.assertEqual(profiler3.num_negatives,
expected_profile.pop('num_negatives'))
def test_profile_merge_edge_case(self):
data = [2.0, 'not a float', 6.0, 'not a float']
df = pd.Series(data).apply(str)
profiler1 = FloatColumn("Float")
profiler1.update(df)
profiler1.match_count = 0
data2 = [10.0, 'not a float', 15.0, 'not a float']
df2 = pd.Series(data2).apply(str)
profiler2 = FloatColumn("Float")
profiler2.update(df2)
profiler3 = profiler1 + profiler2
self.assertEqual(profiler3.stddev, profiler2.stddev)
# test merge with empty data
df1 = pd.Series([], dtype=object)
profiler1 = FloatColumn("Float")
profiler1.update(df1)
df2 = pd.Series([], dtype=object)
profiler2 = FloatColumn("Float")
profiler2.update(df2)
profiler = profiler1 + profiler2
self.assertTrue(np.isnan(profiler.skewness))
self.assertTrue(np.isnan(profiler.kurtosis))
self.assertEqual(profiler.min, None)
self.assertEqual(profiler.max, None)
df3 = pd.Series([2.0, 3.0]).apply(str)
profiler3 = FloatColumn("Float")
profiler3.update(df3)
profiler = profiler1 + profiler3
self.assertTrue(np.isnan(profiler.skewness))
self.assertTrue(np.isnan(profiler.kurtosis))
self.assertEqual(profiler.min, 2.0)
self.assertEqual(profiler.max, 3.0)
df4 = pd.Series([4.0, 5.0]).apply(str)
profiler4 = FloatColumn("Float")
profiler4.update(df4)
profiler = profiler3 + profiler4
self.assertEqual(profiler.skewness, 0)
self.assertAlmostEqual(profiler.kurtosis, -1.2)
self.assertEqual(profiler.min, 2.0)
self.assertEqual(profiler.max, 5.0)
self.assertEqual(profiler.num_zeros, 0)
self.assertEqual(profiler.num_negatives, 0)
df5 = pd.Series([0.0, 0.0, -1.1, -1.0]).apply(str)
profiler5 = FloatColumn("Float")
profiler5.update(df5)
profiler = profiler4 + profiler5
self.assertEqual(profiler.min, -1.1)
self.assertEqual(profiler.max, 5)
self.assertEqual(profiler.num_zeros, 2)
self.assertEqual(profiler.num_negatives, 2)
def test_custom_bin_count_merge(self):
options = FloatOptions()
options.histogram_and_quantiles.bin_count_or_method = 10
data = [2.0, 'not a float', 6.0, 'not a float']
df = pd.Series(data).apply(str)
profiler1 = FloatColumn("Float", options)
profiler1.update(df)
data2 = [10.0, 'not a float', 15.0, 'not a float']
df2 = pd.Series(data2).apply(str)
profiler2 = FloatColumn("Float", options)
profiler2.update(df2)
# no warning should occur
with warnings.catch_warnings(record=True) as w:
merge_profile = profiler1 + profiler2
self.assertListEqual([], w)
self.assertEqual(10, merge_profile.user_set_histogram_bin)
# make bin counts different and get warning
profiler2.user_set_histogram_bin = 120
with self.assertWarnsRegex(UserWarning,
'User set histogram bin counts did not '
'match. Choosing the larger bin count.'):
merged_profile = profiler1 + profiler2
self.assertEqual(120, merged_profile.user_set_histogram_bin)
def test_profile_merge_no_bin_overlap(self):
data = [2.0, 'not a float', 6.0, 'not a float']
df = pd.Series(data).apply(str)
profiler1 = FloatColumn("Float")
profiler1.update(df)
data2 = [10.0, 'not a float', 15.0, 'not a float']
df2 = pd.Series(data2).apply(str)
profiler2 = FloatColumn("Float")
profiler2.update(df2)
# set bin names so no overlap
profiler1.histogram_bin_method_names = ['No overlap 1']
profiler2.histogram_bin_method_names = ['No overlap 2']
with self.assertRaisesRegex(ValueError,
'Profiles have no overlapping bin methods '
'and therefore cannot be added together.'):
profiler1 + profiler2
def test_profile_merge_with_different_options(self):
# Creating first profiler with default options
options = FloatOptions()
options.max.is_enabled = False
options.min.is_enabled = False
options.histogram_and_quantiles.bin_count_or_method = None
data = [2, 4, 6, 8]
df = pd.Series(data).apply(str)
profiler1 = FloatColumn("Float", options=options)
profiler1.update(df)
# Creating second profiler with separate options
options = FloatOptions()
options.min.is_enabled = False
options.precision.is_enabled = False
options.histogram_and_quantiles.bin_count_or_method = None
data2 = [10, 15]
df2 = pd.Series(data2).apply(str)
profiler2 = FloatColumn("Float", options=options)
profiler2.update(df2)
# Asserting warning when adding 2 profilers with different options
with warnings.catch_warnings(record=True) as w:
profiler3 = profiler1 + profiler2
list_of_warning_messages = []
for warning in w:
list_of_warning_messages.append(str(warning.message))
warning1 = ("precision is disabled because it is not enabled in "
"both profiles.")
warning2 = ("max is disabled because it is not enabled in both "
"profiles.")
self.assertIn(warning1, list_of_warning_messages)
self.assertIn(warning2, list_of_warning_messages)
# Assert that these features are still merged
profile = profiler3.profile
self.assertEqual("doane", profiler3.histogram_selection)
self.assertEqual(21.5, profile['variance'])
self.assertEqual(45.0, profiler3.sum)
# Assert that these features are not calculated
self.assertIsNone(profiler3.max)
self.assertIsNone(profiler3.min)
self.assertEqual(None, profiler3.precision['min'])
self.assertEqual(None, profiler3.precision['max'])
# Creating profiler with precision to 0.1
options = FloatOptions()
options.max.is_enabled = False
options.min.is_enabled = False
options.histogram_and_quantiles.method = None
data = [2, 4, 6, 8]
df =
|
pd.Series(data)
|
pandas.Series
|
from itertools import product
import numpy as np
import pytest
from pandas import DataFrame, Series
import pandas._testing as tm
from pandas.core.base import DataError
# gh-12373 : rolling functions error on float32 data
# make sure rolling functions works for different dtypes
#
# NOTE that these are yielded tests and so _create_data
# is explicitly called.
#
# further note that we are only checking rolling for fully dtype
# compliance (though both expanding and ewm inherit)
class Dtype:
window = 2
funcs = {
"count": lambda v: v.count(),
"max": lambda v: v.max(),
"min": lambda v: v.min(),
"sum": lambda v: v.sum(),
"mean": lambda v: v.mean(),
"std": lambda v: v.std(),
"var": lambda v: v.var(),
"median": lambda v: v.median(),
}
def get_expects(self):
expects = {
"sr1": {
"count": Series([1, 2, 2, 2, 2], dtype="float64"),
"max": Series([np.nan, 1, 2, 3, 4], dtype="float64"),
"min": Series([np.nan, 0, 1, 2, 3], dtype="float64"),
"sum": Series([np.nan, 1, 3, 5, 7], dtype="float64"),
"mean": Series([np.nan, 0.5, 1.5, 2.5, 3.5], dtype="float64"),
"std": Series([np.nan] + [np.sqrt(0.5)] * 4, dtype="float64"),
"var": Series([np.nan, 0.5, 0.5, 0.5, 0.5], dtype="float64"),
"median": Series([np.nan, 0.5, 1.5, 2.5, 3.5], dtype="float64"),
},
"sr2": {
"count": Series([1, 2, 2, 2, 2], dtype="float64"),
"max": Series([np.nan, 10, 8, 6, 4], dtype="float64"),
"min": Series([np.nan, 8, 6, 4, 2], dtype="float64"),
"sum": Series([np.nan, 18, 14, 10, 6], dtype="float64"),
"mean":
|
Series([np.nan, 9, 7, 5, 3], dtype="float64")
|
pandas.Series
|
__author__ = "<NAME>"
"""
This module contains all the main code for the genetic algorithm component. It relies on the Non-dominated Sort Genetic
Algorithm-II, NSGA2, to evolve the development of the population.
"""
import random
from components import structures
from components import components as compos
from components import panels
from components import calculate_cpu_metric
from components import calculate_br_down_metric, calculate_br_up_metric, calculate_wavelength_metric
# from components import parse_component
import numpy as np
import pandas as pd
MAX_RANDOM_SEARCHES = 10
SIDE_PANELS_TOTAL = 7
NUM_OF_COMPONENTS = len(compos) - 1
NUM_OF_STRUCTURES = len(structures) - 1
def create_population(pop_size):
"""
This is a function utilised to create the initial population
:param pop_size: Determines how large a sample of population should be created
:return: Returns 2D array of potential satellite configurations, 1 dimension is the satellites, the second is the
components that make up the satellite
"""
# Making the population. Create the initial array each with a random structure chosen. Then each structure filled
# out with components. Random internals, random externals.
# Gather the number of possible types of structures, this changes with the db and create a vector of the randomly
# generated structures to utilise
structures_pop = [random.randint(0, NUM_OF_STRUCTURES) for _ in range(pop_size)]
population = []
for i in range(pop_size):
satellite = {'Structure': structures['Name'][structures_pop[i]],
'Components': [],
'Panels': [],
'Metrics': np.array([], ndmin=1),
'Fitness': np.array([], ndmin=1),
'Details': np.array([structures['Internal Slots'][structures_pop[i]],
structures['Internal Slots'][structures_pop[i]],
structures['External Slots'][structures_pop[i]],
structures['External Slots'][structures_pop[i]]], ndmin=1)}
available_slots = satellite['Details'][1]
avail_ext_slots = satellite['Details'][3]
under_one = MAX_RANDOM_SEARCHES
while available_slots > 0:
component_num = random.randint(0, NUM_OF_COMPONENTS)
component = compos['Name'][component_num]
if available_slots + compos['Internal Slots'][component_num] > 0 \
and avail_ext_slots + compos['External Slots'][component_num] > 0:
satellite['Components'].append(component)
satellite['Details'][1] += compos['Internal Slots'][component_num]
satellite['Details'][3] += compos['External Slots'][component_num]
available_slots = satellite['Details'][1]
avail_ext_slots = satellite['Details'][3]
if 0 < available_slots < 1:
under_one -= 1
if under_one < 1:
available_slots = 0
side_panel = panels['Name'][random.randint(0, SIDE_PANELS_TOTAL-1)]
end_panel = panels['Name'][random.randint(SIDE_PANELS_TOTAL, len(panels) - 1)]
satellite['Panels'].append([side_panel, end_panel])
# Append the current satellite to the population
population.append(satellite)
# for satellite in population:
# print(satellite)
return population
def create_child_population(population):
"""
This function takes the parent population and creates a population of the same size via combination and
:param population: 2D array of the same type created in the create_population function
:return: Returns a 2D array of the same type as create population
"""
c_pop = []
# Dividing the population in half, making child satellites pairwise from the two sub-populations
pop_a = population[:-(int(np.floor(len(population)/2)))]
pop_b = population[-(int(np.floor(len(population)/2))):]
if len(pop_a) != len(pop_b):
spare = pop_a.pop()
else:
spare = []
# Length of pop_a and pop_b will be the same
# Could refactor the internals of this loop out into dedicated functions, but task for later
for i in range(len(pop_a)):
sat_a = {'Structure': pop_a[i]['Structure'],
'Components': [],
'Panels': [],
'Metrics': np.array([], ndmin=1),
'Fitness': np.array([], ndmin=1),
'Details': np.array([pop_a[i]['Details'][0],
pop_a[i]['Details'][0],
pop_a[i]['Details'][2],
pop_a[i]['Details'][2]], ndmin=1)}
sat_b = {'Structure': pop_b[i]['Structure'],
'Components': [],
'Panels': [],
'Metrics': np.array([], ndmin=1),
'Fitness': np.array([], ndmin=1),
'Details': np.array([pop_b[i]['Details'][0],
pop_b[i]['Details'][0],
pop_b[i]['Details'][2],
pop_b[i]['Details'][2]], ndmin=1)}
comps = []
for comp in pop_a[i]['Components']:
comps.append(comp)
for comp in pop_b[i]['Components']:
comps.append(comp)
# Retrieve the number of available slots for each structure
slots_a = sat_a['Details'][0]
ext_slots_a = sat_a['Details'][2]
slots_b = sat_b['Details'][0]
ext_slots_b = sat_b['Details'][2]
under_one = MAX_RANDOM_SEARCHES
while slots_a > 0:
if comps:
component = comps.pop(random.randint(0, len(comps) - 1))
component_num = np.where(compos['Name'] == component)[0][0]
if slots_a + compos['Internal Slots'][component_num] > 0 \
and ext_slots_a + compos['External Slots'][component_num] > 0:
sat_a['Components'].append(component)
sat_a['Details'][1] += compos['Internal Slots'][component_num]
sat_a['Details'][3] += compos['External Slots'][component_num]
slots_a = sat_a['Details'][1]
ext_slots_a = sat_a['Details'][3]
# Less than one counter
if 0 < slots_a < 1:
under_one -= 1
if under_one < 1:
slots_a = 0
else:
component_num = random.randint(0, NUM_OF_COMPONENTS)
component = compos['Name'][component_num]
if slots_a + compos['Internal Slots'][component_num] > 0 \
and ext_slots_a + compos['External Slots'][component_num] > 0:
sat_a['Components'].append(component)
sat_a['Details'][1] += compos['Internal Slots'][component_num]
sat_a['Details'][3] += compos['External Slots'][component_num]
slots_a = sat_a['Details'][1]
ext_slots_a = sat_a['Details'][3]
if 0 < slots_a < 1:
under_one -= 1
if under_one < 1:
slots_a = 0
# Repeat for sat_b
under_one = MAX_RANDOM_SEARCHES
while slots_b > 0:
if comps:
component = comps.pop(random.randint(0, len(comps) - 1))
component_num = np.where(compos['Name'] == component)[0][0]
if slots_b + compos['Internal Slots'][component_num] > 0 \
and ext_slots_b + compos['External Slots'][component_num] > 0:
sat_b['Components'].append(component)
sat_b['Details'][1] += compos['Internal Slots'][component_num]
sat_b['Details'][3] += compos['External Slots'][component_num]
slots_b = sat_b['Details'][1]
ext_slots_b = sat_b['Details'][3]
# Less than one counter
if 0 < slots_b < 1:
under_one -= 1
if under_one < 1:
slots_b = 0
else:
component_num = random.randint(0, NUM_OF_COMPONENTS)
component = compos['Name'][component_num]
if slots_b + compos['Internal Slots'][component_num] > 0 \
and ext_slots_b + compos['External Slots'][component_num] > 0:
sat_b['Components'].append(component)
sat_b['Details'][1] += compos['Internal Slots'][component_num]
sat_b['Details'][3] += compos['External Slots'][component_num]
slots_b = sat_b['Details'][1]
ext_slots_b = sat_b['Details'][3]
if 0 < slots_b < 1:
under_one -= 1
if under_one < 1:
slots_b = 0
# Randomly select external panels
if random.randint(0, 1) == 0:
sat_a['Panels'] = pop_a[i]['Panels']
sat_b['Panels'] = pop_b[i]['Panels']
else:
sat_a['Panels'] = pop_b[i]['Panels']
sat_b['Panels'] = pop_a[i]['Panels']
# append to child population
c_pop.append(sat_a)
c_pop.append(sat_b)
if spare:
c_pop.append(spare)
# Return child population
return c_pop
def mutate_satellite(satellite, structure_mut_rate):
"""
This function mutates a satellite by generating a new component for the satellite and then filling up the spare
space with the previous components, dropping the remainder. If a new structure is generated, it fills up the
available space with previous components and then fills up the remainder with randomly retrieved components
:param satellite: The satellite to be mutated
:param structure_mut_rate: The chance that the component to be mutated is the structure
:return: The mutated satellite
"""
if random.random() < structure_mut_rate:
# Structure is mutated
structure_num = random.randint(0, NUM_OF_STRUCTURES)
satellite['Structure'] = structures['Name'][structure_num]
satellite['Details'] = np.array([structures['Internal Slots'][structure_num],
structures['Internal Slots'][structure_num],
structures['External Slots'][structure_num],
structures['External Slots'][structure_num]], ndmin=1)
new_comp = []
else:
new_comp = compos['Name'][random.randint(0, NUM_OF_COMPONENTS)]
comps = satellite['Components']
if new_comp:
comps.append(new_comp)
satellite['Components'] = []
available_slots = satellite['Details'][0]
satellite['Details'][1] = satellite['Details'][0]
avail_ext_slots = satellite['Details'][2]
satellite['Details'][3] = satellite['Details'][2]
under_one = MAX_RANDOM_SEARCHES
while available_slots > 0:
if comps:
component = comps.pop()
component_num = np.where(compos['Name'] == component)[0][0]
if available_slots + compos['Internal Slots'][component_num] > 0 \
and avail_ext_slots + compos['External Slots'][component_num] > 0:
satellite['Components'].append(component)
satellite['Details'][1] += compos['Internal Slots'][component_num]
satellite['Details'][3] += compos['External Slots'][component_num]
available_slots = satellite['Details'][1]
avail_ext_slots = satellite['Details'][3]
if 0 < available_slots < 1:
under_one -= 1
if under_one < 1:
available_slots = 0
else:
component_num = random.randint(0, NUM_OF_COMPONENTS)
component = compos['Name'][component_num]
if available_slots + compos['Internal Slots'][component_num] > 0 \
and avail_ext_slots + compos['External Slots'][component_num] > 0:
satellite['Components'].append(component)
satellite['Details'][1] += compos['Internal Slots'][component_num]
satellite['Details'][3] += compos['External Slots'][component_num]
available_slots = satellite['Details'][1]
avail_ext_slots = satellite['Details'][3]
if 0 < available_slots < 1:
under_one -= 1
if under_one < 1:
available_slots = 0
# Clearing the metrics, since they will be different
satellite['Metrics'] = np.array([], ndmin=1)
return satellite
def population_union(population_one, population_two):
"""
This function takes two populations and makes a union of the two into a greater cohesive population.
:param population_one: A population of any size
:param population_two: A different population of any size
:return: P_1 U P_2
"""
return population_one + population_two
def calculate_satellite_metrics(satellite):
"""
This function takes a satellite structure that has been created and evaluates the metrics that it possesses, it
updates the Metrics array in the satellite before returning it
:param satellite: The satellite structure
:return: The satellite structure with the metrics array calculated
"""
# print(satellite)
size = pd.DataFrame(structures.loc[np.where(structures['Name'] ==
satellite['Structure'])[0][0]]).T['Size'].values[0]
comps = satellite['Components']
values = np.array([])
for comp in comps:
comp_num = np.where(compos['Name'] == comp)[0][0]
if not values.any():
values = parse_component(
|
pd.DataFrame(compos.loc[comp_num])
|
pandas.DataFrame
|
# pylint: disable-msg=E1101,W0612
from __future__ import with_statement # for Python 2.5
from datetime import datetime, time, timedelta
import sys
import os
import unittest
import nose
import numpy as np
from pandas import (Index, Series, TimeSeries, DataFrame, isnull,
date_range, Timestamp)
from pandas import DatetimeIndex, Int64Index, to_datetime
from pandas.core.daterange import DateRange
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
from pandas.tseries.index import bdate_range, date_range
import pandas.tseries.tools as tools
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
import pandas.lib as lib
import cPickle as pickle
import pandas.core.datetools as dt
from numpy.random import rand
from pandas.util.testing import assert_frame_equal
import pandas.util.py3compat as py3compat
from pandas.core.datetools import BDay
import pandas.core.common as com
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest
try:
import pytz
except ImportError:
pass
class TestTimeZoneSupport(unittest.TestCase):
def setUp(self):
_skip_if_no_pytz()
def test_utc_to_local_no_modify(self):
rng = date_range('3/11/2012', '3/12/2012', freq='H', tz='utc')
rng_eastern = rng.tz_convert('US/Eastern')
# Values are unmodified
self.assert_(np.array_equal(rng.asi8, rng_eastern.asi8))
self.assert_(rng_eastern.tz == pytz.timezone('US/Eastern'))
def test_localize_utc_conversion(self):
# Localizing to time zone should:
# 1) check for DST ambiguities
# 2) convert to UTC
rng = date_range('3/10/2012', '3/11/2012', freq='30T')
converted = rng.tz_localize('US/Eastern')
expected_naive = rng + offsets.Hour(5)
self.assert_(np.array_equal(converted.asi8, expected_naive.asi8))
# DST ambiguity, this should fail
rng = date_range('3/11/2012', '3/12/2012', freq='30T')
self.assertRaises(Exception, rng.tz_localize, 'US/Eastern')
def test_tz_localize_dti(self):
from pandas.tseries.offsets import Hour
dti = DatetimeIndex(start='1/1/2005', end='1/1/2005 0:00:30.256',
freq='L')
dti2 = dti.tz_localize('US/Eastern')
dti_utc = DatetimeIndex(start='1/1/2005 05:00',
end='1/1/2005 5:00:30.256', freq='L',
tz='utc')
self.assert_(np.array_equal(dti2.values, dti_utc.values))
dti3 = dti2.tz_convert('US/Pacific')
self.assert_(np.array_equal(dti3.values, dti_utc.values))
dti = DatetimeIndex(start='11/6/2011 1:59',
end='11/6/2011 2:00', freq='L')
self.assertRaises(pytz.AmbiguousTimeError, dti.tz_localize,
'US/Eastern')
dti = DatetimeIndex(start='3/13/2011 1:59', end='3/13/2011 2:00',
freq='L')
self.assertRaises(pytz.AmbiguousTimeError, dti.tz_localize,
'US/Eastern')
def test_utc_box_timestamp_and_localize(self):
rng = date_range('3/11/2012', '3/12/2012', freq='H', tz='utc')
rng_eastern = rng.tz_convert('US/Eastern')
tz = pytz.timezone('US/Eastern')
expected = tz.normalize(rng[-1])
stamp = rng_eastern[-1]
self.assertEquals(stamp, expected)
self.assertEquals(stamp.tzinfo, expected.tzinfo)
# right tzinfo
rng = date_range('3/13/2012', '3/14/2012', freq='H', tz='utc')
rng_eastern = rng.tz_convert('US/Eastern')
self.assert_('EDT' in repr(rng_eastern[0].tzinfo))
def test_timestamp_tz_convert(self):
strdates = ['1/1/2012', '3/1/2012', '4/1/2012']
idx = DatetimeIndex(strdates, tz='US/Eastern')
conv = idx[0].tz_convert('US/Pacific')
expected = idx.tz_convert('US/Pacific')[0]
self.assertEquals(conv, expected)
def test_pass_dates_convert_to_utc(self):
strdates = ['1/1/2012', '3/1/2012', '4/1/2012']
idx = DatetimeIndex(strdates)
conv = idx.tz_convert('US/Eastern')
fromdates = DatetimeIndex(strdates, tz='US/Eastern')
self.assert_(conv.tz == fromdates.tz)
self.assert_(np.array_equal(conv.values, fromdates.values))
def test_field_access_localize(self):
strdates = ['1/1/2012', '3/1/2012', '4/1/2012']
rng = DatetimeIndex(strdates, tz='US/Eastern')
self.assert_((rng.hour == 0).all())
def test_with_tz(self):
tz = pytz.timezone('US/Central')
# just want it to work
start = datetime(2011, 3, 12, tzinfo=pytz.utc)
dr = bdate_range(start, periods=50, freq=datetools.Hour())
self.assert_(dr.tz is pytz.utc)
# DateRange with naive datetimes
dr = bdate_range('1/1/2005', '1/1/2009', tz=pytz.utc)
dr = bdate_range('1/1/2005', '1/1/2009', tz=tz)
# normalized
central = dr.tz_convert(tz)
self.assert_(central.tz is tz)
self.assert_(central[0].tz is tz)
# datetimes with tzinfo set
dr = bdate_range(datetime(2005, 1, 1, tzinfo=pytz.utc),
'1/1/2009', tz=pytz.utc)
self.assertRaises(Exception, bdate_range,
datetime(2005, 1, 1, tzinfo=pytz.utc),
'1/1/2009', tz=tz)
def test_tz_localize(self):
dr = bdate_range('1/1/2009', '1/1/2010')
dr_utc = bdate_range('1/1/2009', '1/1/2010', tz=pytz.utc)
localized = dr.tz_localize(pytz.utc)
self.assert_(np.array_equal(dr_utc, localized))
def test_with_tz_ambiguous_times(self):
tz = pytz.timezone('US/Eastern')
rng = bdate_range(datetime(2009, 1, 1), datetime(2010, 1, 1))
# regular no problem
self.assert_(rng.tz_validate())
# March 13, 2011, spring forward, skip from 2 AM to 3 AM
dr = date_range(datetime(2011, 3, 13, 1, 30), periods=3,
freq=datetools.Hour())
self.assertRaises(pytz.AmbiguousTimeError, dr.tz_localize, tz)
# after dst transition, it works
dr = date_range(datetime(2011, 3, 13, 3, 30), periods=3,
freq=datetools.Hour(), tz=tz)
# November 6, 2011, fall back, repeat 2 AM hour
dr = date_range(datetime(2011, 11, 6, 1, 30), periods=3,
freq=datetools.Hour())
self.assertRaises(pytz.AmbiguousTimeError, dr.tz_localize, tz)
# UTC is OK
dr = date_range(datetime(2011, 3, 13), periods=48,
freq=datetools.Minute(30), tz=pytz.utc)
# test utility methods
def test_infer_tz(self):
eastern = pytz.timezone('US/Eastern')
utc = pytz.utc
_start = datetime(2001, 1, 1)
_end = datetime(2009, 1, 1)
start = eastern.localize(_start)
end = eastern.localize(_end)
assert(tools._infer_tzinfo(start, end) is eastern)
assert(tools._infer_tzinfo(start, None) is eastern)
assert(tools._infer_tzinfo(None, end) is eastern)
start = utc.localize(_start)
end = utc.localize(_end)
assert(tools._infer_tzinfo(start, end) is utc)
end = eastern.localize(_end)
self.assertRaises(Exception, tools._infer_tzinfo, start, end)
self.assertRaises(Exception, tools._infer_tzinfo, end, start)
def test_asobject_tz_box(self):
tz = pytz.timezone('US/Eastern')
index = DatetimeIndex(start='1/1/2005', periods=10, tz=tz,
freq='B')
result = index.asobject
self.assert_(result[0].tz is tz)
def test_tz_string(self):
result = date_range('1/1/2000', periods=10, tz='US/Eastern')
expected = date_range('1/1/2000', periods=10,
tz=pytz.timezone('US/Eastern'))
self.assert_(result.equals(expected))
def test_take_dont_lose_meta(self):
_skip_if_no_pytz()
rng = date_range('1/1/2000', periods=20, tz='US/Eastern')
result = rng.take(range(5))
self.assert_(result.tz == rng.tz)
self.assert_(result.freq == rng.freq)
def test_index_with_timezone_repr(self):
rng = date_range('4/13/2010', '5/6/2010')
rng_eastern = rng.tz_convert('US/Eastern')
rng_repr = repr(rng)
self.assert_('2010-04-13 00:00:00' in rng_repr)
class TestTimeZones(unittest.TestCase):
def setUp(self):
_skip_if_no_pytz()
def test_index_equals_with_tz(self):
left = date_range('1/1/2011', periods=100, freq='H', tz='utc')
right = date_range('1/1/2011', periods=100, freq='H',
tz='US/Eastern')
self.assert_(not left.equals(right))
def test_tz_convert_naive(self):
rng = date_range('1/1/2011', periods=100, freq='H')
conv = rng.tz_convert('US/Pacific')
exp = rng.tz_localize('US/Pacific')
self.assert_(conv.equals(exp))
def test_tz_convert(self):
rng = date_range('1/1/2011', periods=100, freq='H')
ts = Series(1, index=rng)
result = ts.tz_convert('utc')
self.assert_(result.index.tz.zone == 'UTC')
df = DataFrame({'a': 1}, index=rng)
result = df.tz_convert('utc')
expected = DataFrame({'a': 1}, rng.tz_convert('UTC'))
self.assert_(result.index.tz.zone == 'UTC')
assert_frame_equal(result, expected)
df = df.T
result = df.tz_convert('utc', axis=1)
self.assert_(result.columns.tz.zone == 'UTC')
assert_frame_equal(result, expected.T)
def test_join_utc_convert(self):
rng = date_range('1/1/2011', periods=100, freq='H', tz='utc')
left = rng.tz_convert('US/Eastern')
right = rng.tz_convert('Europe/Berlin')
for how in ['inner', 'outer', 'left', 'right']:
result = left.join(left[:-5], how=how)
self.assert_(isinstance(result, DatetimeIndex))
self.assert_(result.tz == left.tz)
result = left.join(right[:-5], how=how)
self.assert_(isinstance(result, DatetimeIndex))
self.assert_(result.tz.zone == 'UTC')
def test_arith_utc_convert(self):
rng =
|
date_range('1/1/2011', periods=100, freq='H', tz='utc')
|
pandas.tseries.index.date_range
|
def test_get_number_rows_cols_for_fig():
from mspypeline.helpers import get_number_rows_cols_for_fig
assert get_number_rows_cols_for_fig([1, 1, 1, 1]) == (2, 2)
assert get_number_rows_cols_for_fig(4) == (2, 2)
def test_fill_dict():
from mspypeline.helpers import fill_dict
def test_default_to_regular():
from mspypeline.helpers import default_to_regular
from collections import defaultdict
d = defaultdict(int)
d["a"] += 1
assert isinstance(d, defaultdict)
d = default_to_regular(d)
assert isinstance(d, dict)
assert not isinstance(d, defaultdict)
def test_get_analysis_design():
from mspypeline.helpers import get_analysis_design
assert get_analysis_design(["A1_1", "A1_2", "A2_1", "A2_2"]) == {
'A1': {'1': 'A1_1', '2': 'A1_2'},
'A2': {'1': 'A2_1', '2': 'A2_2'}
}
assert get_analysis_design(["A_1_1"]) == {"A": {"1": {"1": "A_1_1"}}}
def test_plot_annotate_line():
from mspypeline.helpers import plot_annotate_line
def test_venn_names():
from mspypeline.helpers import venn_names
def test_install_r_dependencies():
from mspypeline.helpers.Utils import install_r_dependencies
def test_get_number_of_non_na_values():
from mspypeline.helpers import get_number_of_non_na_values as gna
assert gna(20) > gna(10) > gna(5) > gna(3)
assert gna(3) == gna(2) and gna(3) == gna(1)
def test_get_intersection_and_unique():
from mspypeline.helpers import get_intersection_and_unique
import pandas as pd
df1 = pd.DataFrame()
df2 = pd.DataFrame()
assert all(map(pd.Series.equals,
get_intersection_and_unique(df1, df2),
(pd.Series([], dtype=bool), pd.Series([], dtype=bool), pd.Series([], dtype=bool))))
df1 = pd.DataFrame([[1, 1, 1], [1, 1, 1], [0, 0, 0], [1, 0, 0]])
df2 = pd.DataFrame([[1, 1, 1], [0, 0, 0], [1, 1, 1], [1, 0, 0]])
assert all(map(
pd.Series.equals,
get_intersection_and_unique(df1, df2),
(pd.Series([1, 0, 0, 0], dtype=bool), pd.Series([0, 1, 0, 0], dtype=bool),
|
pd.Series([0, 0, 1, 0], dtype=bool)
|
pandas.Series
|
"""
Unit test for smart explainer
"""
import unittest
from unittest.mock import patch, Mock
import os
from os import path
from pathlib import Path
import types
import pandas as pd
import numpy as np
import catboost as cb
from sklearn.linear_model import LinearRegression
from shapash.explainer.smart_explainer import SmartExplainer
from shapash.explainer.multi_decorator import MultiDecorator
from shapash.explainer.smart_state import SmartState
import category_encoders as ce
import shap
def init_sme_to_pickle_test():
"""
Init sme to pickle test
TODO: Docstring
Returns
-------
[type]
[description]
"""
current = Path(path.abspath(__file__)).parent.parent.parent
pkl_file = path.join(current, 'data/xpl.pkl')
xpl = SmartExplainer()
contributions = pd.DataFrame([[-0.1, 0.2, -0.3], [0.1, -0.2, 0.3]])
y_pred = pd.DataFrame(data=np.array([1, 2]), columns=['pred'])
dataframe_x = pd.DataFrame([[1, 2, 3], [1, 2, 3]])
xpl.compile(contributions=contributions, x=dataframe_x, y_pred=y_pred, model=LinearRegression())
xpl.filter(max_contrib=2)
return pkl_file, xpl
class TestSmartExplainer(unittest.TestCase):
"""
Unit test smart explainer
TODO: Docstring
"""
def test_init(self):
"""
test init smart explainer
"""
xpl = SmartExplainer()
assert hasattr(xpl, 'plot')
def assertRaisesWithMessage(self, msg, func, *args, **kwargs):
try:
func(*args, **kwargs)
self.assertFail()
except Exception as inst:
self.assertEqual(inst.args[0]['message'], msg)
@patch('shapash.explainer.smart_explainer.SmartState')
def test_choose_state_1(self, mock_smart_state):
"""
Unit test choose state 1
Parameters
----------
mock_smart_state : [type]
[description]
"""
xpl = SmartExplainer()
xpl.choose_state('contributions')
mock_smart_state.assert_called()
@patch('shapash.explainer.smart_explainer.MultiDecorator')
def test_choose_state_2(self, mock_multi_decorator):
"""
Unit test choose state 2
Parameters
----------
mock_multi_decorator : [type]
[description]
"""
xpl = SmartExplainer()
xpl.choose_state([1, 2, 3])
mock_multi_decorator.assert_called()
def test_validate_contributions_1(self):
"""
Unit test validate contributions 1
"""
xpl = SmartExplainer()
contributions = [
np.array([[2, 1], [8, 4]]),
np.array([[5, 5], [0, 0]])
]
model = Mock()
model._classes = np.array([1, 3])
model.predict = types.MethodType(self.predict, model)
model.predict_proba = types.MethodType(self.predict_proba, model)
xpl.model = model
xpl._case = "classification"
xpl._classes = list(model._classes)
xpl.state = xpl.choose_state(contributions)
xpl.x_init = pd.DataFrame(
[[1, 2],
[3, 4]],
columns=['Col1', 'Col2'],
index=['Id1', 'Id2']
)
expected_output = [
pd.DataFrame(
[[2, 1], [8, 4]],
columns=['Col1', 'Col2'],
index=['Id1', 'Id2']
),
pd.DataFrame(
[[5, 5], [0, 0]],
columns=['Col1', 'Col2'],
index=['Id1', 'Id2']
)
]
output = xpl.validate_contributions(contributions)
assert len(expected_output) == len(output)
test_list = [pd.testing.assert_frame_equal(e, m) for e, m in zip(expected_output, output)]
assert all(x is None for x in test_list)
def test_apply_preprocessing_1(self):
"""
Unit test apply preprocessing 1
"""
xpl = SmartExplainer()
contributions = [1, 2, 3]
output = xpl.apply_preprocessing(contributions)
expected = contributions
self.assertListEqual(output, expected)
def test_apply_preprocessing_2(self):
"""
Unit test apply preprocessing 2
"""
xpl = SmartExplainer()
xpl.state = Mock()
preprocessing = Mock()
contributions = [1, 2, 3]
xpl.apply_preprocessing(contributions, preprocessing)
xpl.state.inverse_transform_contributions.assert_called()
def test_modify_postprocessing_1(self):
"""
Unit test modify postprocessing 1
"""
xpl = SmartExplainer()
xpl.x_pred = pd.DataFrame(
[[1, 2],
[3, 4]],
columns=['Col1', 'Col2'],
index=['Id1', 'Id2']
)
xpl.features_dict = {'Col1': 'Column1', 'Col2': 'Column2'}
xpl.columns_dict = {0: 'Col1', 1:'Col2'}
xpl.inv_features_dict = {'Column1': 'Col1', 'Column2': 'Col2'}
postprocessing = {0: {'type' : 'suffix', 'rule':' t'},
'Column2': {'type' : 'prefix', 'rule' : 'test'}}
expected_output = {
'Col1': {'type' : 'suffix', 'rule':' t'},
'Col2': {'type' : 'prefix', 'rule' : 'test'}
}
output = xpl.modify_postprocessing(postprocessing)
assert output == expected_output
def test_modify_postprocessing_2(self):
"""
Unit test modify postprocessing 2
"""
xpl = SmartExplainer()
xpl.x_pred = pd.DataFrame(
[[1, 2],
[3, 4]],
columns=['Col1', 'Col2'],
index=['Id1', 'Id2']
)
xpl.features_dict = {'Col1': 'Column1', 'Col2': 'Column2'}
xpl.columns_dict = {0: 'Col1', 1: 'Col2'}
xpl.inv_features_dict = {'Column1': 'Col1', 'Column2': 'Col2'}
postprocessing = {'Error': {'type': 'suffix', 'rule': ' t'}}
with self.assertRaises(ValueError):
xpl.modify_postprocessing(postprocessing)
def test_check_postprocessing_1(self):
"""
Unit test check_postprocessing
"""
xpl = SmartExplainer()
xpl.x_pred = pd.DataFrame(
[[1, 2],
[3, 4]],
columns=['Col1', 'Col2'],
index=['Id1', 'Id2']
)
xpl.features_dict = {'Col1': 'Column1', 'Col2': 'Column2'}
xpl.columns_dict = {0: 'Col1', 1: 'Col2'}
xpl.inv_features_dict = {'Column1': 'Col1', 'Column2': 'Col2'}
postprocessing1 = {0: {'Error': 'suffix', 'rule': ' t'}}
postprocessing2 = {0: {'type': 'Error', 'rule': ' t'}}
postprocessing3 = {0: {'type': 'suffix', 'Error': ' t'}}
postprocessing4 = {0: {'type': 'suffix', 'rule': ' '}}
postprocessing5 = {0: {'type': 'case', 'rule': 'lower'}}
postprocessing6 = {0: {'type': 'case', 'rule': 'Error'}}
with self.assertRaises(ValueError):
xpl.check_postprocessing(postprocessing1)
xpl.check_postprocessing(postprocessing2)
xpl.check_postprocessing(postprocessing3)
xpl.check_postprocessing(postprocessing4)
xpl.check_postprocessing(postprocessing5)
xpl.check_postprocessing(postprocessing6)
def test_apply_postprocessing_1(self):
"""
Unit test apply_postprocessing 1
"""
xpl = SmartExplainer()
xpl.x_pred = pd.DataFrame(
[[1, 2],
[3, 4]],
columns=['Col1', 'Col2'],
index=['Id1', 'Id2']
)
xpl.features_dict = {'Col1': 'Column1', 'Col2': 'Column2'}
xpl.columns_dict = {0: 'Col1', 1: 'Col2'}
xpl.inv_features_dict = {'Column1': 'Col1', 'Column2': 'Col2'}
assert np.array_equal(xpl.x_pred, xpl.apply_postprocessing())
def test_apply_postprocessing_2(self):
"""
Unit test apply_postprocessing 2
"""
xpl = SmartExplainer()
xpl.x_pred = pd.DataFrame(
[[1, 2],
[3, 4]],
columns=['Col1', 'Col2'],
index=['Id1', 'Id2']
)
xpl.features_dict = {'Col1': 'Column1', 'Col2': 'Column2'}
xpl.columns_dict = {0: 'Col1', 1: 'Col2'}
xpl.inv_features_dict = {'Column1': 'Col1', 'Column2': 'Col2'}
postprocessing = {'Col1': {'type': 'suffix', 'rule': ' t'},
'Col2': {'type': 'prefix', 'rule': 'test'}}
expected_output = pd.DataFrame(
data=[['1 t', 'test2'],
['3 t', 'test4']],
columns=['Col1', 'Col2'],
index=['Id1', 'Id2']
)
output = xpl.apply_postprocessing(postprocessing)
assert np.array_equal(output, expected_output)
def test_check_contributions_1(self):
"""
Unit test check contributions 1
"""
xpl = SmartExplainer()
xpl.contributions, xpl.x_pred = Mock(), Mock()
xpl.state = Mock()
xpl.check_contributions()
xpl.state.check_contributions.assert_called_with(xpl.contributions, xpl.x_pred)
def test_check_contributions_2(self):
"""
Unit test check contributions 2
"""
xpl = SmartExplainer()
xpl.contributions, xpl.x_pred = Mock(), Mock()
mock_state = Mock()
mock_state.check_contributions.return_value = False
xpl.state = mock_state
with self.assertRaises(ValueError):
xpl.check_contributions()
def test_check_label_dict_1(self):
"""
Unit test check label dict 1
"""
xpl = SmartExplainer(label_dict={1: 'Yes', 0: 'No'})
xpl._classes = [0, 1]
xpl._case = 'classification'
xpl.check_label_dict()
def test_check_label_dict_2(self):
"""
Unit test check label dict 2
"""
xpl = SmartExplainer()
xpl._case = 'regression'
xpl.check_label_dict()
def test_check_features_dict_1(self):
"""
Unit test check features dict 1
"""
xpl = SmartExplainer(features_dict={'Age': 'Age (Years Old)'})
xpl.columns_dict = {0: 'Age', 1: 'Education', 2: 'Sex'}
xpl.check_features_dict()
assert xpl.features_dict['Age'] == 'Age (Years Old)'
assert xpl.features_dict['Education'] == 'Education'
@patch('shapash.explainer.smart_explainer.SmartExplainer.choose_state')
@patch('shapash.explainer.smart_explainer.SmartExplainer.apply_preprocessing')
def test_compile_0(self, mock_apply_preprocessing, mock_choose_state):
"""
Unit test compile
Parameters
----------
mock_apply_preprocessing : [type]
[description]
mock_choose_state : [type]
[description]
"""
xpl = SmartExplainer()
mock_state = Mock()
mock_choose_state.return_value = mock_state
model = lambda: None
model.predict = types.MethodType(self.predict, model)
mock_state.rank_contributions.return_value = 1, 2, 3
contributions = pd.DataFrame([[-0.1, 0.2, -0.3], [0.1, -0.2, 0.3]])
mock_state.validate_contributions.return_value = contributions
mock_apply_preprocessing.return_value = contributions
x_pred = pd.DataFrame([[1, 2, 3], [1, 2, 3]])
xpl.compile(x=x_pred, model=model, contributions=contributions)
assert hasattr(xpl, 'state')
assert xpl.state == mock_state
assert hasattr(xpl, 'x_pred')
pd.testing.assert_frame_equal(xpl.x_pred, x_pred)
assert hasattr(xpl, 'contributions')
pd.testing.assert_frame_equal(xpl.contributions, contributions)
mock_choose_state.assert_called()
mock_state.validate_contributions.assert_called()
mock_apply_preprocessing.assert_called()
mock_state.rank_contributions.assert_called()
assert xpl._case == "regression"
def test_compile_1(self):
"""
Unit test compile 1
checking compile method without model
"""
df = pd.DataFrame(range(0, 21), columns=['id'])
df['y'] = df['id'].apply(lambda x: 1 if x < 10 else 0)
df['x1'] = np.random.randint(1, 123, df.shape[0])
df['x2'] = np.random.randint(1, 3, df.shape[0])
df = df.set_index('id')
clf = cb.CatBoostClassifier(n_estimators=1).fit(df[['x1', 'x2']], df['y'])
xpl = SmartExplainer()
xpl.compile(model=clf, x=df[['x1', 'x2']])
assert xpl._case == "classification"
self.assertListEqual(xpl._classes, [0, 1])
def test_compile_2(self):
"""
Unit test compile 2
checking new attributes added to the compile method
"""
df = pd.DataFrame(range(0, 5), columns=['id'])
df['y'] = df['id'].apply(lambda x: 1 if x < 2 else 0)
df['x1'] = np.random.randint(1, 123, df.shape[0])
df['x2'] = ["S", "M", "S", "D", "M"]
df = df.set_index('id')
encoder = ce.OrdinalEncoder(cols=["x2"], handle_unknown="None")
encoder_fitted = encoder.fit(df)
df_encoded = encoder_fitted.transform(df)
output = df[["x1", "x2"]].copy()
output["x2"] = ["single", "married", "single", "divorced", "married"]
clf = cb.CatBoostClassifier(n_estimators=1).fit(df_encoded[['x1', 'x2']], df_encoded['y'])
postprocessing_1 = {"x2": {
"type": "transcoding",
"rule": {"S": "single", "M": "married", "D": "divorced"}}}
postprocessing_2 = {
"family_situation": {
"type": "transcoding",
"rule": {"S": "single", "M": "married", "D": "divorced"}}}
xpl_postprocessing1 = SmartExplainer()
xpl_postprocessing2 = SmartExplainer(features_dict={"x1": "age",
"x2": "family_situation"}
)
xpl_postprocessing3 = SmartExplainer()
xpl_postprocessing1.compile(model=clf,
x=df_encoded[['x1', 'x2']],
preprocessing=encoder_fitted,
postprocessing=postprocessing_1)
xpl_postprocessing2.compile(model=clf,
x=df_encoded[['x1', 'x2']],
preprocessing=encoder_fitted,
postprocessing=postprocessing_2)
xpl_postprocessing3.compile(model=clf,
x=df_encoded[['x1', 'x2']],
preprocessing=None,
postprocessing=None)
assert hasattr(xpl_postprocessing1, "preprocessing")
assert hasattr(xpl_postprocessing1, "postprocessing")
assert hasattr(xpl_postprocessing2, "preprocessing")
assert hasattr(xpl_postprocessing2, "postprocessing")
assert hasattr(xpl_postprocessing3, "preprocessing")
assert hasattr(xpl_postprocessing3, "postprocessing")
|
pd.testing.assert_frame_equal(xpl_postprocessing1.x_pred, output)
|
pandas.testing.assert_frame_equal
|
from datetime import datetime, timedelta
import unittest
from pandas.core.datetools import (
bday, BDay, BQuarterEnd, BMonthEnd, BYearEnd, MonthEnd,
DateOffset, Week, YearBegin, YearEnd, Hour, Minute, Second,
format, ole2datetime, to_datetime, normalize_date,
getOffset, getOffsetName, inferTimeRule, hasOffsetName)
from nose.tools import assert_raises
####
## Misc function tests
####
def test_format():
actual = format(datetime(2008, 1, 15))
assert actual == '20080115'
def test_ole2datetime():
actual = ole2datetime(60000)
assert actual == datetime(2064, 4, 8)
assert_raises(Exception, ole2datetime, 60)
def test_to_datetime1():
actual = to_datetime(datetime(2008, 1, 15))
assert actual == datetime(2008, 1, 15)
actual = to_datetime('20080115')
assert actual == datetime(2008, 1, 15)
# unparseable
s = 'Month 1, 1999'
assert to_datetime(s) == s
def test_normalize_date():
actual = normalize_date(datetime(2007, 10, 1, 1, 12, 5, 10))
assert actual == datetime(2007, 10, 1)
#####
### DateOffset Tests
#####
class TestDateOffset(object):
def setUp(self):
self.d = datetime(2008, 1, 2)
def test_repr(self):
repr(DateOffset())
repr(DateOffset(2))
repr(2 * DateOffset())
repr(2 * DateOffset(months=2))
def test_mul(self):
assert DateOffset(2) == 2 * DateOffset(1)
assert DateOffset(2) == DateOffset(1) * 2
def test_constructor(self):
assert((self.d + DateOffset(months=2)) == datetime(2008, 3, 2))
assert((self.d - DateOffset(months=2)) == datetime(2007, 11, 2))
assert((self.d + DateOffset(2)) == datetime(2008, 1, 4))
assert not DateOffset(2).isAnchored()
assert DateOffset(1).isAnchored()
d = datetime(2008, 1, 31)
assert((d + DateOffset(months=1)) == datetime(2008, 2, 29))
def test_copy(self):
assert(DateOffset(months=2).copy() == DateOffset(months=2))
class TestBusinessDay(unittest.TestCase):
def setUp(self):
self.d = datetime(2008, 1, 1)
self.offset = BDay()
self.offset2 = BDay(2)
def test_repr(self):
assert repr(self.offset) == '<1 BusinessDay>'
assert repr(self.offset2) == '<2 BusinessDays>'
expected = '<1 BusinessDay: offset=datetime.timedelta(1)>'
assert repr(self.offset + timedelta(1)) == expected
def test_with_offset(self):
offset = self.offset + timedelta(hours=2)
assert (self.d + offset) == datetime(2008, 1, 2, 2)
def testEQ(self):
self.assertEqual(self.offset2, self.offset2)
def test_mul(self):
pass
def test_hash(self):
self.assertEqual(hash(self.offset2), hash(self.offset2))
def testCall(self):
self.assertEqual(self.offset2(self.d), datetime(2008, 1, 3))
def testRAdd(self):
self.assertEqual(self.d + self.offset2, self.offset2 + self.d)
def testSub(self):
off = self.offset2
self.assertRaises(Exception, off.__sub__, self.d)
self.assertEqual(2 * off - off, off)
self.assertEqual(self.d - self.offset2, self.d + BDay(-2))
def testRSub(self):
self.assertEqual(self.d - self.offset2, (-self.offset2).apply(self.d))
def testMult1(self):
self.assertEqual(self.d + 10*self.offset, self.d + BDay(10))
def testMult2(self):
self.assertEqual(self.d + (-5*BDay(-10)),
self.d + BDay(50))
def testRollback1(self):
self.assertEqual(BDay(10).rollback(self.d), self.d)
def testRollback2(self):
self.assertEqual(BDay(10).rollback(datetime(2008, 1, 5)), datetime(2008, 1, 4))
def testRollforward1(self):
self.assertEqual(BDay(10).rollforward(self.d), self.d)
def testRollforward2(self):
self.assertEqual(BDay(10).rollforward(datetime(2008, 1, 5)), datetime(2008, 1, 7))
def test_onOffset(self):
tests = [(BDay(), datetime(2008, 1, 1), True),
(BDay(), datetime(2008, 1, 5), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def test_apply(self):
tests = []
tests.append((bday,
{datetime(2008, 1, 1): datetime(2008, 1, 2),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 8)}))
tests.append((2*bday,
{datetime(2008, 1, 1): datetime(2008, 1, 3),
datetime(2008, 1, 4): datetime(2008, 1, 8),
datetime(2008, 1, 5): datetime(2008, 1, 8),
datetime(2008, 1, 6): datetime(2008, 1, 8),
datetime(2008, 1, 7): datetime(2008, 1, 9)}))
tests.append((-bday,
{datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 3),
datetime(2008, 1, 5): datetime(2008, 1, 4),
datetime(2008, 1, 6): datetime(2008, 1, 4),
datetime(2008, 1, 7): datetime(2008, 1, 4),
datetime(2008, 1, 8): datetime(2008, 1, 7)}))
tests.append((-2*bday,
{datetime(2008, 1, 1): datetime(2007, 12, 28),
datetime(2008, 1, 4): datetime(2008, 1, 2),
datetime(2008, 1, 5): datetime(2008, 1, 3),
datetime(2008, 1, 6): datetime(2008, 1, 3),
datetime(2008, 1, 7): datetime(2008, 1, 3),
datetime(2008, 1, 8): datetime(2008, 1, 4),
datetime(2008, 1, 9): datetime(2008, 1, 7)}))
tests.append((BDay(0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 4): datetime(2008, 1, 4),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_apply_corner(self):
self.assertRaises(Exception, BDay().apply, BMonthEnd())
def assertOnOffset(offset, date, expected):
actual = offset.onOffset(date)
assert actual == expected
class TestWeek(unittest.TestCase):
def test_corner(self):
self.assertRaises(Exception, Week, weekday=7)
self.assertRaises(Exception, Week, weekday=-1)
def test_isAnchored(self):
self.assert_(Week(weekday=0).isAnchored())
self.assert_(not Week().isAnchored())
self.assert_(not Week(2, weekday=2).isAnchored())
self.assert_(not Week(2).isAnchored())
def test_offset(self):
tests = []
tests.append((Week(), # not business week
{datetime(2008, 1, 1): datetime(2008, 1, 8),
datetime(2008, 1, 4): datetime(2008, 1, 11),
datetime(2008, 1, 5): datetime(2008, 1, 12),
datetime(2008, 1, 6): datetime(2008, 1, 13),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
tests.append((Week(weekday=0), # Mon
{datetime(2007, 12, 31): datetime(2008, 1, 7),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
tests.append((Week(0, weekday=0), # n=0 -> roll forward. Mon
{datetime(2007, 12, 31): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
tests.append((Week(-2, weekday=1), # n=0 -> roll forward. Mon
{datetime(2010, 4, 6): datetime(2010, 3, 23),
datetime(2010, 4, 8): datetime(2010, 3, 30),
datetime(2010, 4, 5): datetime(2010, 3, 23)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [(Week(weekday=0), datetime(2008, 1, 1), False),
(Week(weekday=0), datetime(2008, 1, 2), False),
(Week(weekday=0), datetime(2008, 1, 3), False),
(Week(weekday=0), datetime(2008, 1, 4), False),
(Week(weekday=0), datetime(2008, 1, 5), False),
(Week(weekday=0), datetime(2008, 1, 6), False),
(Week(weekday=0), datetime(2008, 1, 7), True),
(Week(weekday=1), datetime(2008, 1, 1), True),
(Week(weekday=1), datetime(2008, 1, 2), False),
(Week(weekday=1), datetime(2008, 1, 3), False),
(Week(weekday=1), datetime(2008, 1, 4), False),
(Week(weekday=1), datetime(2008, 1, 5), False),
(Week(weekday=1), datetime(2008, 1, 6), False),
(Week(weekday=1), datetime(2008, 1, 7), False),
(Week(weekday=2), datetime(2008, 1, 1), False),
(Week(weekday=2), datetime(2008, 1, 2), True),
(Week(weekday=2), datetime(2008, 1, 3), False),
(Week(weekday=2), datetime(2008, 1, 4), False),
(Week(weekday=2), datetime(2008, 1, 5), False),
(Week(weekday=2), datetime(2008, 1, 6), False),
(Week(weekday=2), datetime(2008, 1, 7), False),
(Week(weekday=3), datetime(2008, 1, 1), False),
(Week(weekday=3), datetime(2008, 1, 2), False),
(Week(weekday=3), datetime(2008, 1, 3), True),
(Week(weekday=3), datetime(2008, 1, 4), False),
(Week(weekday=3), datetime(2008, 1, 5), False),
(Week(weekday=3), datetime(2008, 1, 6), False),
(Week(weekday=3), datetime(2008, 1, 7), False),
(Week(weekday=4), datetime(2008, 1, 1), False),
(Week(weekday=4), datetime(2008, 1, 2), False),
(Week(weekday=4), datetime(2008, 1, 3), False),
(Week(weekday=4), datetime(2008, 1, 4), True),
(Week(weekday=4), datetime(2008, 1, 5), False),
(Week(weekday=4), datetime(2008, 1, 6), False),
(Week(weekday=4), datetime(2008, 1, 7), False),
(Week(weekday=5), datetime(2008, 1, 1), False),
(Week(weekday=5), datetime(2008, 1, 2), False),
(Week(weekday=5), datetime(2008, 1, 3), False),
(Week(weekday=5), datetime(2008, 1, 4), False),
(Week(weekday=5), datetime(2008, 1, 5), True),
(Week(weekday=5), datetime(2008, 1, 6), False),
(Week(weekday=5), datetime(2008, 1, 7), False),
(Week(weekday=6), datetime(2008, 1, 1), False),
(Week(weekday=6), datetime(2008, 1, 2), False),
(Week(weekday=6), datetime(2008, 1, 3), False),
(Week(weekday=6), datetime(2008, 1, 4), False),
(Week(weekday=6), datetime(2008, 1, 5), False),
(Week(weekday=6), datetime(2008, 1, 6), True),
(Week(weekday=6), datetime(2008, 1, 7), False),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestBMonthEnd(unittest.TestCase):
def test_offset(self):
tests = []
tests.append((BMonthEnd(),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2007, 1, 31),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2006, 12, 1): datetime(2006, 12, 29)}))
tests.append((BMonthEnd(0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 29),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31)}))
tests.append((BMonthEnd(2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 3, 31),
datetime(2006, 12, 29): datetime(2007, 2, 28),
datetime(2006, 12, 31): datetime(2007, 2, 28),
datetime(2007, 1, 1): datetime(2007, 2, 28),
datetime(2006, 11, 1): datetime(2006, 12, 29)}))
tests.append((BMonthEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 29),
datetime(2008, 6, 30): datetime(2008, 5, 30),
datetime(2008, 12, 31): datetime(2008, 11, 28),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 30): datetime(2006, 12, 29),
datetime(2007, 1, 1): datetime(2006, 12, 29)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [(BMonthEnd(), datetime(2007, 12, 31), True),
(BMonthEnd(), datetime(2008, 1, 1), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestMonthEnd(unittest.TestCase):
def test_offset(self):
tests = []
tests.append((MonthEnd(),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2006, 12, 1): datetime(2006, 12, 31)}))
tests.append((MonthEnd(0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31)}))
tests.append((MonthEnd(2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 3, 31),
datetime(2006, 12, 29): datetime(2007, 1, 31),
datetime(2006, 12, 31): datetime(2007, 2, 28),
datetime(2007, 1, 1): datetime(2007, 2, 28),
datetime(2006, 11, 1): datetime(2006, 12, 31)}))
tests.append((MonthEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 5, 31),
datetime(2008, 12, 31): datetime(2008, 11, 30),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 30): datetime(2006, 11, 30),
datetime(2007, 1, 1): datetime(2006, 12, 31)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [(MonthEnd(), datetime(2007, 12, 31), True),
(MonthEnd(), datetime(2008, 1, 1), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestBQuarterEnd(unittest.TestCase):
def test_corner(self):
self.assertRaises(Exception, BQuarterEnd, startingMonth=4)
self.assertRaises(Exception, BQuarterEnd, startingMonth=-1)
def test_isAnchored(self):
self.assert_(BQuarterEnd(startingMonth=1).isAnchored())
self.assert_(BQuarterEnd().isAnchored())
self.assert_(not BQuarterEnd(2, startingMonth=1).isAnchored())
def test_offset(self):
tests = []
tests.append((BQuarterEnd(startingMonth=1),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 4, 30),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 7, 31),}))
tests.append((BQuarterEnd(startingMonth=2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2008, 2, 15): datetime(2008, 2, 29),
datetime(2008, 2, 29): datetime(2008, 5, 30),
datetime(2008, 3, 15): datetime(2008, 5, 30),
datetime(2008, 3, 31): datetime(2008, 5, 30),
datetime(2008, 4, 15): datetime(2008, 5, 30),
datetime(2008, 4, 30): datetime(2008, 5, 30),}))
tests.append((BQuarterEnd(startingMonth=1, n=0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 4, 30),}))
tests.append((BQuarterEnd(startingMonth=1, n=-1),
{datetime(2008, 1, 1): datetime(2007, 10, 31),
datetime(2008, 1, 31): datetime(2007, 10, 31),
datetime(2008, 2, 15): datetime(2008, 1, 31),
datetime(2008, 2, 29): datetime(2008, 1, 31),
datetime(2008, 3, 15): datetime(2008, 1, 31),
datetime(2008, 3, 31): datetime(2008, 1, 31),
datetime(2008, 4, 15): datetime(2008, 1, 31),
datetime(2008, 4, 30): datetime(2008, 1, 31),}))
tests.append((BQuarterEnd(startingMonth=1, n=2),
{datetime(2008, 1, 31): datetime(2008, 7, 31),
datetime(2008, 2, 15): datetime(2008, 7, 31),
datetime(2008, 2, 29): datetime(2008, 7, 31),
datetime(2008, 3, 15): datetime(2008, 7, 31),
datetime(2008, 3, 31): datetime(2008, 7, 31),
datetime(2008, 4, 15): datetime(2008, 7, 31),
datetime(2008, 4, 30): datetime(2008, 10, 31),}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
# corner
offset = BQuarterEnd(n=-1, startingMonth=1)
self.assertEqual(datetime(2010, 1, 31) + offset, datetime(2010, 1, 29))
def test_onOffset(self):
tests = [(BQuarterEnd(1, startingMonth=1), datetime(2008, 1, 31), True),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 12, 31), False),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 2, 29), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 30), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 31), False),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 4, 30), True),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 5, 30), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 29), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 30), False),
(
|
BQuarterEnd(1, startingMonth=2)
|
pandas.core.datetools.BQuarterEnd
|
from seaman.docs.notebooks import generate_input
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
import inspect
def run_function(df,function):
parameters_function = generate_input.list_parameters(function)
return function(**df[parameters_function])
def get_inputs(function,df,parameters_this):
# Needed parameters:
parameters_function = set(generate_input.list_parameters(function))
parameters_equation = set(parameters_this)
df_parameters_all = set(df.columns)
avaliable_parameters = df_parameters_all | parameters_equation
missing_parameters = parameters_function - avaliable_parameters
#if len(missing_parameters) > 0 :
# raise ValueError('Mssing:%s' % missing_parameters)
df_parameters = (parameters_function & df_parameters_all) - parameters_equation
df_input = df[df_parameters]
return df_input
def sympy_to_shipdict_coefficient_name(sympy_name:str):
s = sympy_name.lower()
s2 = s.replace('_','')
s3 = s2.replace('delta','d')
return s3
class Model():
y_key = ''
boundaries = {}
@property
def parameters_this(self):
signature = inspect.signature(self._equation)
parameters_this = list(signature.parameters.keys())
parameters_this.remove('df')
return parameters_this
@property
def bounds(self):
minimums = []
maximums = []
for key in self.parameters_this:
boundaries = self.boundaries.get(key, (-np.inf, np.inf))
assert len(boundaries) == 2
minimums.append(boundaries[0])
maximums.append(boundaries[1])
return [tuple(minimums), tuple(maximums)]
def prepare_data(self, data, coefficients:dict):
df = data.copy()
for key,value in coefficients.items():
df[key]=value
df_input = get_inputs(function=self.function, df=df, parameters_this=self.parameters_this)
df_input = df_input.astype(float)
return df_input
def fit(self, data, coefficients:dict, **kwargs):
df_input = self.prepare_data(data=data, coefficients=coefficients)
p0 = 0 * np.ones(len(self.parameters_this), )
popt, pcov = curve_fit(f=self._equation, xdata=df_input, ydata=data[self.y_key], p0=p0, bounds = self.bounds,
**kwargs)
parameter_values = list(popt)
self.parameters = dict(zip(self.parameters_this, parameter_values))
def run(self,result):
mask =
|
pd.isnull(result)
|
pandas.isnull
|
"""Plots module."""
import cv2
import itertools
from multimethod import multimethod
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import os
import pandas as pd
import plotly
import plotly.express as px
import plotly.graph_objects as go
import plotly.subplots
import typing
import mesmo.config
import mesmo.data_interface
import mesmo.electric_grid_models
import mesmo.thermal_grid_models
import mesmo.utils
if mesmo.config.config["plots"]["add_basemap"]:
# Basemap requires `contextily`, which is an optional dependency, due to needing installation through `conda`.
import contextily as ctx
logger = mesmo.config.get_logger(__name__)
class ElectricGridGraph(nx.DiGraph):
"""Electric grid graph object."""
edge_by_line_name: pd.Series
transformer_nodes: list
node_positions: dict
node_labels: dict
@multimethod
def __init__(self, scenario_name: str):
# Obtain electric grid data.
electric_grid_data = mesmo.data_interface.ElectricGridData(scenario_name)
self.__init__(electric_grid_data)
@multimethod
def __init__(self, electric_grid_data: mesmo.data_interface.ElectricGridData):
# Create electric grid graph.
super().__init__()
self.add_nodes_from(electric_grid_data.electric_grid_nodes.loc[:, "node_name"].tolist())
self.add_edges_from(
electric_grid_data.electric_grid_lines.loc[:, ["node_1_name", "node_2_name"]].itertuples(index=False)
)
# Obtain edges indexed by line name.
self.edge_by_line_name = pd.Series(
electric_grid_data.electric_grid_lines.loc[:, ["node_1_name", "node_2_name"]].itertuples(index=False),
index=electric_grid_data.electric_grid_lines.loc[:, "line_name"],
)
# Obtain transformer nodes (secondary nodes of transformers).
self.transformer_nodes = electric_grid_data.electric_grid_transformers.loc[:, "node_2_name"].tolist()
# Obtain node positions / labels.
if pd.notnull(electric_grid_data.electric_grid_nodes.loc[:, ["longitude", "latitude"]]).any().any():
self.node_positions = electric_grid_data.electric_grid_nodes.loc[:, ["longitude", "latitude"]].T.to_dict(
"list"
)
else:
# If latitude / longitude are not defined, generate node positions based on networkx layout.
self.node_positions = nx.spring_layout(self)
# Only keep positions for nodes with line connections.
# Only keep positions for nodes with line connections.
for node_name in self.node_positions:
if (
node_name
not in electric_grid_data.electric_grid_lines.loc[:, ["node_1_name", "node_2_name"]].values.ravel()
):
self.node_positions[node_name] = [np.nan, np.nan]
self.node_labels = electric_grid_data.electric_grid_nodes.loc[:, "node_name"].to_dict()
class ThermalGridGraph(nx.DiGraph):
"""Thermal grid graph object."""
edge_by_line_name: pd.Series
node_positions: dict
node_labels: dict
@multimethod
def __init__(self, scenario_name: str):
# Obtain thermal grid data.
thermal_grid_data = mesmo.data_interface.ThermalGridData(scenario_name)
self.__init__(thermal_grid_data)
@multimethod
def __init__(self, thermal_grid_data: mesmo.data_interface.ThermalGridData):
# Create thermal grid graph.
super().__init__()
self.add_nodes_from(thermal_grid_data.thermal_grid_nodes.loc[:, "node_name"].tolist())
self.add_edges_from(
thermal_grid_data.thermal_grid_lines.loc[:, ["node_1_name", "node_2_name"]].itertuples(index=False)
)
# Obtain edges indexed by line name.
self.edge_by_line_name = pd.Series(
thermal_grid_data.thermal_grid_lines.loc[:, ["node_1_name", "node_2_name"]].itertuples(index=False),
index=thermal_grid_data.thermal_grid_lines.loc[:, "line_name"],
)
# Obtain node positions / labels.
if pd.notnull(thermal_grid_data.thermal_grid_nodes.loc[:, ["longitude", "latitude"]]).any().any():
self.node_positions = thermal_grid_data.thermal_grid_nodes.loc[:, ["longitude", "latitude"]].T.to_dict(
"list"
)
else:
# If latitude / longitude are not defined, generate node positions based on networkx layout.
self.node_positions = nx.spring_layout(self)
# Only keep positions for nodes with line connections.
for node_name in self.node_positions:
if (
node_name
not in thermal_grid_data.thermal_grid_lines.loc[:, ["node_1_name", "node_2_name"]].values.ravel()
):
self.node_positions[node_name] = [np.nan, np.nan]
self.node_labels = thermal_grid_data.thermal_grid_nodes.loc[:, "node_name"].to_dict()
def create_video(name: str, labels: pd.Index, results_path: str):
# Obtain images / frames based on given name / labels.
images = []
for label in labels:
if type(label) is pd.Timestamp:
filename = f"{name}_{mesmo.utils.get_alphanumeric_string(f'{label}')}.png"
images.append(cv2.imread(os.path.join(results_path, filename)))
if len(images) == 0:
raise FileNotFoundError(
f"No images / frames found for video of '{name}'. Check if given labels are valid timesteps."
)
# Setup video.
video_writer = cv2.VideoWriter(
os.path.join(results_path, f"{name}.avi"), # Filename.
cv2.VideoWriter_fourcc(*"XVID"), # Format.
2.0, # FPS.
images[0].shape[1::-1], # Size.
)
# Write frames to video.
for image in images:
video_writer.write(image)
# Cleanup.
video_writer.release()
cv2.destroyAllWindows()
@multimethod
def plot_line_utilization(
grid_model: typing.Union[mesmo.electric_grid_models.ElectricGridModel, mesmo.thermal_grid_models.ThermalGridModel],
grid_graph: typing.Union[ElectricGridGraph, ThermalGridGraph],
value_vector: pd.DataFrame,
results_path: str,
vmin=None,
vmax=None,
make_video=False,
**kwargs,
):
# Obtain colorscale minimum / maximum value.
vmin = value_vector.values.ravel().min() if vmin is None else vmin
vmax = value_vector.values.ravel().max() if vmax is None else vmax
# Create plot for each column in `value_vector`.
mesmo.utils.starmap(
wrapper_plot_line_utilization,
zip(
itertools.repeat(grid_model),
itertools.repeat(grid_graph),
[row[1] for row in value_vector.iterrows()],
itertools.repeat(results_path),
),
dict(vmin=vmin, vmax=vmax, **kwargs),
)
# Stitch images to video.
if make_video:
create_video(name="line_utilization", labels=value_vector.index, results_path=results_path)
@multimethod
def plot_line_utilization(
grid_model: typing.Union[mesmo.electric_grid_models.ElectricGridModel, mesmo.thermal_grid_models.ThermalGridModel],
grid_graph: typing.Union[ElectricGridGraph, ThermalGridGraph],
value_vector: pd.Series,
results_path: str,
vmin=None,
vmax=None,
label=None,
value_unit="W",
horizontal_line_value=None,
):
# Obtain colorscale minimum / maximum value.
vmin = value_vector.values.ravel().min() if vmin is None else vmin
vmax = value_vector.values.ravel().max() if vmax is None else vmax
if horizontal_line_value is not None:
vmin = min(vmin, 1.05 * horizontal_line_value, 0.95 * horizontal_line_value)
vmax = max(vmax, 1.05 * horizontal_line_value, 0.95 * horizontal_line_value)
# Obtain values for plotting.
if isinstance(grid_graph, ElectricGridGraph):
# Take only lines & mean across all phases.
values = value_vector.loc[grid_model.lines].mean(level="branch_name")
else:
values = value_vector
# Obtain label.
label = value_vector.name if label is None else label
# Obtain plot title / filename.
if label is not None:
title = f"Line utilization: {label.strftime('%H:%M:%S') if type(label) is pd.Timestamp else label}"
filename = f"line_utilization_{mesmo.utils.get_alphanumeric_string(f'{label}')}.png"
else:
title = f"Line utilization"
filename = "line_utilization.png"
y_label = f"Utilization [{value_unit}]"
# Create plot.
plt.figure()
plt.title(title)
plt.bar(range(len(grid_model.line_names)), values)
if horizontal_line_value is not None:
plt.hlines(horizontal_line_value, -0.5, len(grid_model.line_names) - 0.5, colors="red")
plt.xticks(range(len(grid_model.line_names)), grid_model.line_names, rotation=45, ha="right")
plt.ylim([vmin, vmax])
plt.ylabel(y_label)
plt.tight_layout()
plt.savefig(os.path.join(results_path, filename))
# plt.show()
plt.close()
def wrapper_plot_line_utilization(*args, **kwargs):
plot_line_utilization(*args, **kwargs)
@multimethod
def plot_transformer_utilization(
grid_model: mesmo.electric_grid_models.ElectricGridModel,
grid_graph: ElectricGridGraph,
value_vector: pd.DataFrame,
results_path: str,
vmin=None,
vmax=None,
make_video=False,
**kwargs,
):
# Obtain colorscale minimum / maximum value.
vmin = value_vector.values.ravel().min() if vmin is None else vmin
vmax = value_vector.values.ravel().max() if vmax is None else vmax
# Create plot for each column in `value_vector`.
mesmo.utils.starmap(
wrapper_plot_transformer_utilization,
zip(
itertools.repeat(grid_model),
itertools.repeat(grid_graph),
[row[1] for row in value_vector.iterrows()],
itertools.repeat(results_path),
),
dict(vmin=vmin, vmax=vmax, **kwargs),
)
# Stitch images to video.
if make_video:
create_video(name="transformer_utilization", labels=value_vector.index, results_path=results_path)
@multimethod
def plot_transformer_utilization(
grid_model: mesmo.electric_grid_models.ElectricGridModel,
grid_graph: ElectricGridGraph,
value_vector: pd.Series,
results_path: str,
vmin=None,
vmax=None,
label=None,
value_unit="W",
horizontal_line_value=None,
):
# Obtain colorscale minimum / maximum value.
vmin = value_vector.values.ravel().min() if vmin is None else vmin
vmax = value_vector.values.ravel().max() if vmax is None else vmax
if horizontal_line_value is not None:
vmin = min(vmin, 1.05 * horizontal_line_value, 0.95 * horizontal_line_value)
vmax = max(vmax, 1.05 * horizontal_line_value, 0.95 * horizontal_line_value)
# Obtain values for plotting.
if isinstance(grid_graph, ElectricGridGraph):
# Take only transformers & mean across all phases.
values = value_vector.loc[grid_model.transformers].mean(level="branch_name")
else:
values = value_vector
# Obtain label.
label = value_vector.name if label is None else label
# Obtain plot title / filename.
if label is not None:
title = f"Transformer utilization: {label.strftime('%H:%M:%S') if type(label) is pd.Timestamp else label}"
filename = f"transformer_utilization_{mesmo.utils.get_alphanumeric_string(f'{label}')}.png"
else:
title = f"Transformer utilization"
filename = "transformer_utilization.png"
y_label = f"Utilization [{value_unit}]"
# Create plot.
plt.figure()
plt.title(title)
plt.bar(range(len(grid_model.transformer_names)), values)
if horizontal_line_value is not None:
plt.hlines(horizontal_line_value, -0.5, len(grid_model.transformer_names) - 0.5, colors="red")
plt.xticks(range(len(grid_model.transformer_names)), grid_model.transformer_names, rotation=45, ha="right")
plt.ylim([vmin, vmax])
plt.ylabel(y_label)
plt.tight_layout()
plt.savefig(os.path.join(results_path, filename))
# plt.show()
plt.close()
def wrapper_plot_transformer_utilization(*args, **kwargs):
plot_transformer_utilization(*args, **kwargs)
@multimethod
def plot_node_utilization(
grid_model: typing.Union[mesmo.electric_grid_models.ElectricGridModel, mesmo.thermal_grid_models.ThermalGridModel],
grid_graph: typing.Union[ElectricGridGraph, ThermalGridGraph],
value_vector: pd.DataFrame,
results_path: str,
vmin=None,
vmax=None,
make_video=False,
**kwargs,
):
# Obtain colorscale minimum / maximum value.
vmin = value_vector.values.ravel().min() if vmin is None else vmin
vmax = value_vector.values.ravel().max() if vmax is None else vmax
# Create plot for each column in `value_vector`.
mesmo.utils.starmap(
wrapper_plot_node_utilization,
zip(
itertools.repeat(grid_model),
itertools.repeat(grid_graph),
[row[1] for row in value_vector.iterrows()],
itertools.repeat(results_path),
),
dict(vmin=vmin, vmax=vmax, **kwargs),
)
# Stitch images to video.
if make_video:
create_video(
name="node_voltage" if isinstance(grid_graph, ElectricGridGraph) else "node_head",
labels=value_vector.index,
results_path=results_path,
)
@multimethod
def plot_node_utilization(
grid_model: typing.Union[mesmo.electric_grid_models.ElectricGridModel, mesmo.thermal_grid_models.ThermalGridModel],
grid_graph: typing.Union[ElectricGridGraph, ThermalGridGraph],
value_vector: pd.Series,
results_path: str,
vmin=None,
vmax=None,
label=None,
value_unit=None,
suffix=None,
horizontal_line_value=None,
):
# Obtain colorscale minimum / maximum value.
vmin = value_vector.values.ravel().min() if vmin is None else vmin
vmax = value_vector.values.ravel().max() if vmax is None else vmax
if horizontal_line_value is not None:
vmin = min(vmin, 1.05 * horizontal_line_value, 0.95 * horizontal_line_value)
vmax = max(vmax, 1.05 * horizontal_line_value, 0.95 * horizontal_line_value)
# Obtain values for plotting.
if isinstance(grid_graph, ElectricGridGraph):
# Take mean across all phases.
values = value_vector.mean(level="node_name")
else:
values = value_vector
# Obtain label.
label = value_vector.name if label is None else label
# Obtain plot title / filename / unit.
if isinstance(grid_graph, ElectricGridGraph):
title = "Node voltage" + f" {suffix}" if suffix is not None else ""
filename = "node_voltage"
y_label = "Voltage" + f" {suffix}" if suffix is not None else ""
value_unit = "V" if value_unit is None else value_unit
else:
title = "Node head" + f" {suffix}" if suffix is not None else ""
filename = "node_head"
y_label = "Head" + f" {suffix}" if suffix is not None else ""
value_unit = "m" if value_unit is None else value_unit
if label is not None:
title = f"{title}: {label.strftime('%H:%M:%S') if type(label) is pd.Timestamp else label}"
filename = f"{filename}_{mesmo.utils.get_alphanumeric_string(f'{label}')}.png"
else:
title = f"{title}"
filename = f"{filename}.png"
# Create plot.
plt.figure()
plt.title(title)
plt.bar(range(len(grid_model.node_names)), values)
if horizontal_line_value is not None:
plt.hlines(horizontal_line_value, -0.5, len(grid_model.node_names) - 0.5, colors="red")
plt.xticks(range(len(grid_model.node_names)), grid_model.node_names, rotation=45, ha="right")
plt.ylim([vmin, vmax])
plt.ylabel(f"{y_label} [{value_unit}]")
plt.tight_layout()
plt.savefig(os.path.join(results_path, filename))
# plt.show()
plt.close()
def wrapper_plot_node_utilization(*args, **kwargs):
plot_node_utilization(*args, **kwargs)
@multimethod
def plot_grid_line_utilization(
grid_model: typing.Union[mesmo.electric_grid_models.ElectricGridModel, mesmo.thermal_grid_models.ThermalGridModel],
grid_graph: typing.Union[ElectricGridGraph, ThermalGridGraph],
value_vector: pd.DataFrame,
results_path: str,
vmin=None,
vmax=None,
make_video=False,
**kwargs,
):
# Obtain colorscale minimum / maximum value.
vmin = value_vector.values.ravel().min() if vmin is None else vmin
vmax = value_vector.values.ravel().max() if vmax is None else vmax
# Create plot for each column in `value_vector`.
mesmo.utils.starmap(
wrapper_plot_grid_line_utilization,
zip(
itertools.repeat(grid_model),
itertools.repeat(grid_graph),
[row[1] for row in value_vector.iterrows()],
itertools.repeat(results_path),
),
dict(vmin=vmin, vmax=vmax, **kwargs),
)
# Stitch images to video.
if make_video:
create_video(name="grid_line_utilization", labels=value_vector.index, results_path=results_path)
@multimethod
def plot_grid_line_utilization(
grid_model: typing.Union[mesmo.electric_grid_models.ElectricGridModel, mesmo.thermal_grid_models.ThermalGridModel],
grid_graph: typing.Union[ElectricGridGraph, ThermalGridGraph],
value_vector: pd.Series,
results_path: str,
vmin=None,
vmax=None,
label=None,
value_unit="W",
):
# Obtain colorscale minimum / maximum value.
vmin = value_vector.values.ravel().min() if vmin is None else vmin
vmax = value_vector.values.ravel().max() if vmax is None else vmax
# Obtain edge color values.
if isinstance(grid_graph, ElectricGridGraph):
# Take only lines & mean across all phases.
values = value_vector.loc[grid_model.lines].mean(level="branch_name")
else:
values = value_vector
# Obtain label.
label = value_vector.name if label is None else label
# Obtain plot title / filename.
if label is not None:
title = f"Line utilization: {label.strftime('%H:%M:%S') if type(label) is pd.Timestamp else label}"
filename = f"grid_line_utilization_{mesmo.utils.get_alphanumeric_string(f'{label}')}.png"
else:
title = "Line utilization"
filename = "grid_line_utilization.png"
# Create plot.
plt.figure()
plt.title(title)
if isinstance(grid_graph, ElectricGridGraph):
# Highlight transformer nodes.
nx.draw(
grid_graph,
nodelist=grid_graph.transformer_nodes,
edgelist=[],
pos=grid_graph.node_positions,
node_size=100.0,
node_color="red",
)
nx.draw(
grid_graph,
edgelist=grid_graph.edge_by_line_name.loc[grid_model.line_names].tolist(),
pos=grid_graph.node_positions,
node_size=10.0,
node_color="black",
arrows=False,
width=5.0,
edge_vmin=vmin,
edge_vmax=vmax,
edge_color=values.tolist(),
)
# Add colorbar.
sm = plt.cm.ScalarMappable(norm=plt.Normalize(vmin=vmin, vmax=vmax))
cb = plt.colorbar(sm, shrink=0.9)
cb.set_label(f"Utilization [{value_unit}]")
if mesmo.config.config["plots"]["add_basemap"]:
# Adjust axis limits, to get a better view of surrounding map.
xlim = plt.xlim()
xlim = (xlim[0] - 0.05 * (xlim[1] - xlim[0]), xlim[1] + 0.05 * (xlim[1] - xlim[0]))
plt.xlim(xlim)
ylim = plt.ylim()
ylim = (ylim[0] - 0.05 * (ylim[1] - ylim[0]), ylim[1] + 0.05 * (ylim[1] - ylim[0]))
plt.ylim(ylim)
# Add contextual basemap layer for orientation.
ctx.add_basemap(
plt.gca(),
crs="EPSG:4326", # Use 'EPSG:4326' for latitude / longitude coordinates.
source=ctx.providers.CartoDB.Positron,
attribution=mesmo.config.config["plots"]["show_basemap_attribution"],
)
# Store / show / close figure.
plt.savefig(os.path.join(results_path, filename), bbox_inches="tight")
# plt.show()
plt.close()
def wrapper_plot_grid_line_utilization(*args, **kwargs):
plot_grid_line_utilization(*args, **kwargs)
@multimethod
def plot_grid_transformer_utilization(
grid_model: mesmo.electric_grid_models.ElectricGridModel,
grid_graph: ElectricGridGraph,
value_vector: pd.DataFrame,
results_path: str,
vmin=None,
vmax=None,
make_video=False,
**kwargs,
):
# Obtain colorscale minimum / maximum value.
vmin = value_vector.values.ravel().min() if vmin is None else vmin
vmax = value_vector.values.ravel().max() if vmax is None else vmax
# Create plot for each column in `value_vector`.
mesmo.utils.starmap(
wrapper_plot_grid_transformer_utilization,
zip(
itertools.repeat(grid_model),
itertools.repeat(grid_graph),
[row[1] for row in value_vector.iterrows()],
itertools.repeat(results_path),
),
dict(vmin=vmin, vmax=vmax, **kwargs),
)
# Stitch images to video.
if make_video:
create_video(name="grid_transformer_utilization", labels=value_vector.index, results_path=results_path)
@multimethod
def plot_grid_transformer_utilization(
grid_model: mesmo.electric_grid_models.ElectricGridModel,
grid_graph: ElectricGridGraph,
value_vector: pd.Series,
results_path: str,
vmin=None,
vmax=None,
label=None,
value_unit="W",
):
# Obtain colorscale minimum / maximum value.
vmin = value_vector.values.ravel().min() if vmin is None else vmin
vmax = value_vector.values.ravel().max() if vmax is None else vmax
# Obtain edge color values.
# - Take only transformers & mean across all phases.
values = value_vector.loc[grid_model.transformers].mean(level="branch_name")
# Obtain label.
label = value_vector.name if label is None else label
# Obtain plot title / filename.
if label is not None:
title = f"Transformer utilization: {label.strftime('%H:%M:%S') if type(label) is pd.Timestamp else label}"
filename = f"grid_transformer_utilization_{mesmo.utils.get_alphanumeric_string(f'{label}')}.png"
else:
title = "Transformer utilization"
filename = "grid_transformer_utilization.png"
# Create plot.
plt.figure()
plt.title(title)
# Plot nodes all nodes, but with node size 0.0, just to get appropriate map extent.
nx.draw(grid_graph, edgelist=[], pos=grid_graph.node_positions, node_size=0.0)
nx.draw(
grid_graph,
nodelist=grid_graph.transformer_nodes,
edgelist=[],
pos=grid_graph.node_positions,
node_size=200.0,
node_color=values.tolist(),
vmin=vmin,
vmax=vmax,
edgecolors="black",
)
# Add colorbar.
sm = plt.cm.ScalarMappable(norm=plt.Normalize(vmin=vmin, vmax=vmax))
cb = plt.colorbar(sm, shrink=0.9)
cb.set_label(f"Utilization [{value_unit}]")
if mesmo.config.config["plots"]["add_basemap"]:
# Adjust axis limits, to get a better view of surrounding map.
xlim = plt.xlim()
xlim = (xlim[0] - 0.05 * (xlim[1] - xlim[0]), xlim[1] + 0.05 * (xlim[1] - xlim[0]))
plt.xlim(xlim)
ylim = plt.ylim()
ylim = (ylim[0] - 0.05 * (ylim[1] - ylim[0]), ylim[1] + 0.05 * (ylim[1] - ylim[0]))
plt.ylim(ylim)
# Add contextual basemap layer for orientation.
ctx.add_basemap(
plt.gca(),
crs="EPSG:4326", # Use 'EPSG:4326' for latitude / longitude coordinates.
source=ctx.providers.CartoDB.Positron,
attribution=mesmo.config.config["plots"]["show_basemap_attribution"],
)
# Store / show / close figure.
plt.savefig(os.path.join(results_path, filename), bbox_inches="tight")
# plt.show()
plt.close()
def wrapper_plot_grid_transformer_utilization(*args, **kwargs):
plot_grid_transformer_utilization(*args, **kwargs)
@multimethod
def plot_grid_node_utilization(
grid_model: typing.Union[mesmo.electric_grid_models.ElectricGridModel, mesmo.thermal_grid_models.ThermalGridModel],
grid_graph: typing.Union[ElectricGridGraph, ThermalGridGraph],
value_vector: pd.DataFrame,
results_path: str,
vmin=None,
vmax=None,
make_video=False,
**kwargs,
):
# Obtain colorscale minimum / maximum value.
vmin = value_vector.values.ravel().min() if vmin is None else vmin
vmax = value_vector.values.ravel().max() if vmax is None else vmax
# Create plot for each column in `value_vector`.
mesmo.utils.starmap(
wrapper_plot_grid_node_utilization,
zip(
itertools.repeat(grid_model),
itertools.repeat(grid_graph),
[row[1] for row in value_vector.iterrows()],
itertools.repeat(results_path),
),
dict(vmin=vmin, vmax=vmax, **kwargs),
)
# Stitch images to video.
if make_video:
create_video(
name="grid_node_voltage" if isinstance(grid_graph, ElectricGridGraph) else "grid_node_head",
labels=value_vector.index,
results_path=results_path,
)
@multimethod
def plot_grid_node_utilization(
grid_model: typing.Union[mesmo.electric_grid_models.ElectricGridModel, mesmo.thermal_grid_models.ThermalGridModel],
grid_graph: typing.Union[ElectricGridGraph, ThermalGridGraph],
value_vector: pd.Series,
results_path: str,
vmin=None,
vmax=None,
label=None,
value_unit=None,
suffix=None,
):
# Obtain colorscale minimum / maximum value.
vmin = value_vector.values.ravel().min() if vmin is None else vmin
vmax = value_vector.values.ravel().max() if vmax is None else vmax
# Obtain edge color values.
if isinstance(grid_graph, ElectricGridGraph):
# Take mean across all phases.
values = value_vector.mean(level="node_name")
else:
values = value_vector
# Obtain label.
label = value_vector.name if label is None else label
# Obtain plot title / filename / unit.
if isinstance(grid_graph, ElectricGridGraph):
title = "Node voltage" + f" {suffix}" if suffix is not None else ""
filename = "grid_node_voltage"
colorbar_label = f"Voltage {suffix}"
value_unit = "V" if value_unit is None else value_unit
else:
title = "Node head" + f" {suffix}" if suffix is not None else ""
filename = "grid_node_head"
colorbar_label = "Head" + f" {suffix}" if suffix is not None else ""
value_unit = "m" if value_unit is None else value_unit
if label is not None:
title = f"{title}: {label.strftime('%H:%M:%S') if type(label) is pd.Timestamp else label}"
filename = f"{filename}_{mesmo.utils.get_alphanumeric_string(f'{label}')}.png"
else:
title = f"{title}"
filename = f"{filename}.png"
# Create plot.
plt.figure()
plt.title(title)
if isinstance(grid_graph, ElectricGridGraph):
# Highlight transformer nodes.
nx.draw(
grid_graph,
nodelist=grid_graph.transformer_nodes,
edgelist=[],
pos=grid_graph.node_positions,
node_size=100.0,
node_color="red",
)
nx.draw(
grid_graph,
nodelist=grid_model.node_names.tolist(),
pos=grid_graph.node_positions,
node_size=50.0,
arrows=False,
vmin=vmin,
vmax=vmax,
node_color=values.tolist(),
edgecolors="black",
)
# Add colorbar.
sm = plt.cm.ScalarMappable(norm=plt.Normalize(vmin=vmin, vmax=vmax))
cb = plt.colorbar(sm, shrink=0.9)
cb.set_label(f"{colorbar_label} [{value_unit}]")
if mesmo.config.config["plots"]["add_basemap"]:
# Adjust axis limits, to get a better view of surrounding map.
xlim = plt.xlim()
xlim = (xlim[0] - 0.05 * (xlim[1] - xlim[0]), xlim[1] + 0.05 * (xlim[1] - xlim[0]))
plt.xlim(xlim)
ylim = plt.ylim()
ylim = (ylim[0] - 0.05 * (ylim[1] - ylim[0]), ylim[1] + 0.05 * (ylim[1] - ylim[0]))
plt.ylim(ylim)
# Add contextual basemap layer for orientation.
ctx.add_basemap(
plt.gca(),
crs="EPSG:4326", # Use 'EPSG:4326' for latitude / longitude coordinates.
source=ctx.providers.CartoDB.Positron,
attribution=mesmo.config.config["plots"]["show_basemap_attribution"],
)
# Store / show / close figure.
plt.savefig(os.path.join(results_path, filename), bbox_inches="tight")
# plt.show()
plt.close()
def wrapper_plot_grid_node_utilization(*args, **kwargs):
plot_grid_node_utilization(*args, **kwargs)
def plot_total_active_power(values_dict: dict, results_path: str):
# Pre-process values.
for key in values_dict:
values_dict[key] = values_dict[key].sum(axis="columns") / 1e6
values_dict[key].loc[:] = np.abs(np.real(values_dict[key]))
# Obtain plot title / labels / filename.
title = "Total active power"
filename = "total_active_power_timeseries"
y_label = "Active power"
value_unit = "MW"
# Create plot.
figure = go.Figure()
for key in values_dict:
figure.add_trace(
go.Scatter(
x=values_dict[key].index,
y=values_dict[key].values,
name=key,
fill="tozeroy",
line=go.scatter.Line(shape="hv"),
)
)
figure.update_layout(
title=title,
yaxis_title=f"{y_label} [{value_unit}]",
xaxis=go.layout.XAxis(tickformat="%H:%M"),
legend=go.layout.Legend(x=0.99, xanchor="auto", y=0.01, yanchor="auto"),
)
# figure.show()
figure.write_image(os.path.join(results_path, filename + f".{mesmo.config.config['plots']['file_format']}"))
def plot_line_utilization_histogram(
values_dict: dict, results_path: str, histogram_minimum=0.0, histogram_maximum=1.0, histogram_bin_count=100
):
# Obtain histogram bins.
histogram_interval = (histogram_maximum - histogram_minimum) / histogram_bin_count
histogram_bins = np.arange(histogram_minimum, histogram_maximum + histogram_interval, histogram_interval)
# Pre-process values.
for key in values_dict:
# Obtain maximum utilization for all lines.
values_dict[key] = (
values_dict[key].loc[:, values_dict[key].columns.get_level_values("branch_type") == "line"].max()
)
# Set over-utilized lines to 1 p.u. for better visualization.
values_dict[key].loc[values_dict[key] > histogram_maximum] = histogram_maximum
# Obtain histogram values.
values_dict[key] = pd.Series(
[*np.histogram(values_dict[key], bins=histogram_bins)[0], 0], index=histogram_bins
) / len(values_dict[key])
# Obtain plot title / labels / filename.
title = "Lines"
filename = "line_utilization_histogram"
y_label = "Peak utilization"
value_unit = "p.u."
# Create plot.
figure = go.Figure()
for key in values_dict:
figure.add_trace(go.Bar(x=values_dict[key].index, y=values_dict[key].values, name=key))
figure.update_layout(
title=title,
xaxis_title=f"{y_label} [{value_unit}]",
yaxis_title="Frequency",
legend=go.layout.Legend(x=0.99, xanchor="auto", y=0.99, yanchor="auto"),
)
# figure.show()
figure.write_image(os.path.join(results_path, filename + f".{mesmo.config.config['plots']['file_format']}"))
def plot_line_utilization_histogram_cumulative(
values_dict: dict, results_path: str, histogram_minimum=0.0, histogram_maximum=1.0, histogram_bin_count=100
):
# Obtain histogram bins.
histogram_interval = (histogram_maximum - histogram_minimum) / histogram_bin_count
histogram_bins = np.arange(histogram_minimum, histogram_maximum + histogram_interval, histogram_interval)
# Pre-process values.
for key in values_dict:
# Obtain maximum utilization for all lines.
values_dict[key] = (
values_dict[key].loc[:, values_dict[key].columns.get_level_values("branch_type") == "line"].max()
)
# Set over-utilized lines to 1 p.u. for better visualization.
values_dict[key].loc[values_dict[key] > histogram_maximum] = histogram_maximum
# Obtain cumulative histogram values.
values_dict[key] = pd.Series(
[*np.histogram(values_dict[key], bins=histogram_bins)[0], 0], index=histogram_bins
).cumsum() / len(values_dict[key])
# Obtain plot title / labels / filename.
title = "Lines"
filename = "line_utilization_histogram_cumulative"
y_label = "Peak utilization"
value_unit = "p.u."
# Create plot.
figure = go.Figure()
for key in values_dict:
figure.add_trace(
go.Scatter(x=values_dict[key].index, y=values_dict[key].values, name=key, line=go.scatter.Line(shape="hv"))
)
# Add horizontal line at 90%.
figure.add_shape(
go.layout.Shape(
x0=0, x1=1, xref="paper", y0=0.9, y1=0.9, yref="y", type="line", line=go.layout.shape.Line(width=2)
)
)
figure.update_layout(
title=title,
xaxis_title=f"{y_label} [{value_unit}]",
yaxis_title="Cumulative proportion",
legend=go.layout.Legend(x=0.99, xanchor="auto", y=0.01, yanchor="auto"),
)
# figure.show()
figure.write_image(os.path.join(results_path, filename + f".{mesmo.config.config['plots']['file_format']}"))
def plot_transformer_utilization_histogram(
values_dict: dict,
results_path: str,
selected_columns=None,
histogram_minimum=0.0,
histogram_maximum=1.0,
histogram_bin_count=100,
):
# Obtain histogram bins.
histogram_interval = (histogram_maximum - histogram_minimum) / histogram_bin_count
histogram_bins = np.arange(histogram_minimum, histogram_maximum + histogram_interval, histogram_interval)
# Pre-process values.
for key in values_dict:
# Only use selected columns.
values_dict[key] = (
values_dict[key].loc[:, selected_columns] if selected_columns is not None else values_dict[key]
)
# Obtain maximum utilization for all transformers.
values_dict[key] = (
values_dict[key].loc[:, values_dict[key].columns.get_level_values("branch_type") == "transformer"].max()
)
# Set over-utilized transformers to 1 p.u. for better visualization.
values_dict[key].loc[values_dict[key] > histogram_maximum] = histogram_maximum
# Obtain histogram values.
values_dict[key] = pd.Series(
[*np.histogram(values_dict[key], bins=histogram_bins)[0], 0], index=histogram_bins
) / len(values_dict[key])
# Obtain plot title / labels / filename.
title = "1MVA Transformers"
filename = "transformer_utilization_histogram"
y_label = "Peak utilization"
value_unit = "p.u."
# Create plot.
figure = go.Figure()
for key in values_dict:
figure.add_trace(go.Bar(x=values_dict[key].index, y=values_dict[key].values, name=key))
figure.update_layout(
title=title,
xaxis_title=f"{y_label} [{value_unit}]",
yaxis_title="Frequency",
legend=go.layout.Legend(x=0.99, xanchor="auto", y=0.99, yanchor="auto"),
)
# figure.show()
figure.write_image(os.path.join(results_path, filename + f".{mesmo.config.config['plots']['file_format']}"))
def plot_transformer_utilization_histogram_cumulative(
values_dict: dict,
results_path: str,
selected_columns=None,
histogram_minimum=0.0,
histogram_maximum=1.0,
histogram_bin_count=100,
):
# Obtain histogram bins.
histogram_interval = (histogram_maximum - histogram_minimum) / histogram_bin_count
histogram_bins = np.arange(histogram_minimum, histogram_maximum + histogram_interval, histogram_interval)
# Pre-process values.
for key in values_dict:
# Only use selected columns.
values_dict[key] = (
values_dict[key].loc[:, selected_columns] if selected_columns is not None else values_dict[key]
)
# Obtain maximum utilization for all transformers.
values_dict[key] = (
values_dict[key].loc[:, values_dict[key].columns.get_level_values("branch_type") == "transformer"].max()
)
# Set over-utilized transformers to 1 p.u. for better visualization.
values_dict[key].loc[values_dict[key] > histogram_maximum] = histogram_maximum
# Obtain histogram values.
values_dict[key] = pd.Series(
[*np.histogram(values_dict[key], bins=histogram_bins)[0], 0], index=histogram_bins
).cumsum() / len(values_dict[key])
# Obtain plot title / labels / filename.
title = "1MVA Transformers"
filename = "transformer_utilization_histogram_cumulative"
y_label = "Peak utilization"
value_unit = "p.u."
# Create plot.
figure = go.Figure()
for key in values_dict:
figure.add_trace(
go.Scatter(x=values_dict[key].index, y=values_dict[key].values, name=key, line=go.scatter.Line(shape="hv"))
)
# Add horizontal line at 90%.
figure.add_shape(
go.layout.Shape(
x0=0, x1=1, xref="paper", y0=0.9, y1=0.9, yref="y", type="line", line=go.layout.shape.Line(width=2)
)
)
figure.update_layout(
title=title,
xaxis_title=f"{y_label} [{value_unit}]",
yaxis_title="Cumulative proportion",
legend=go.layout.Legend(x=0.99, xanchor="auto", y=0.01, yanchor="auto"),
)
# figure.show()
figure.write_image(os.path.join(results_path, filename + f".{mesmo.config.config['plots']['file_format']}"))
def plot_histogram_cumulative_branch_utilization(
results_dict: mesmo.problems.ResultsDict,
results_path: str,
branch_type: str = "line",
filename_base: str = "branch_utilization_",
filename_suffix: str = "",
plot_title: str = None,
histogram_minimum: float = 0.0,
histogram_maximum: float = 1.0,
histogram_bin_count: int = 100,
vertical_line: float = None,
horizontal_line: float = None,
x_tick_interval: float = 0.1,
):
# Obtain histogram bins.
histogram_interval = (histogram_maximum - histogram_minimum) / histogram_bin_count
histogram_bins = np.arange(histogram_minimum, histogram_maximum + histogram_interval, histogram_interval)
# Pre-process values.
values_dict = dict.fromkeys(results_dict.keys())
box_values_dict = dict.fromkeys(results_dict.keys())
for key in values_dict:
# Obtain branch power in p.u. values.
values_dict[key] = (
results_dict[key].branch_power_magnitude_vector_1_per_unit
+ results_dict[key].branch_power_magnitude_vector_2_per_unit
) / 2
# Select branch type.
values_dict[key] = values_dict[key].loc[
:, values_dict[key].columns.get_level_values("branch_type") == branch_type
]
# Obtain maximum utilization.
values_dict[key] = values_dict[key].max()
# Keep these values for boxplot.
box_values_dict[key] = values_dict[key]
# Obtain cumulative histogram values.
values_dict[key] = pd.Series(
[*np.histogram(values_dict[key], bins=histogram_bins)[0], 0], index=histogram_bins
).cumsum() / len(values_dict[key])
# Obtain plot title / labels / filename.
title = None
filename = f"{filename_base}{branch_type}{filename_suffix}"
value_label = f"Maximum {branch_type} utilization"
value_unit = "p.u."
# Create plot.
figure = plotly.subplots.make_subplots(
rows=2, cols=1, row_heights=[0.2, 0.8], vertical_spacing=0.0, shared_xaxes=True
)
for key in box_values_dict:
figure.add_trace(go.Box(x=box_values_dict[key].values, boxmean=True, name=key, showlegend=False), row=1, col=1)
for index, key in enumerate(values_dict):
figure.add_trace(
go.Scatter(
x=values_dict[key].index,
y=values_dict[key].values,
name=key,
line=go.scatter.Line(shape="hv", color=plotly.colors.qualitative.D3[index]),
),
row=2,
col=1,
)
# Add vertical line.
if vertical_line is not None:
figure.add_shape(
go.layout.Shape(
x0=vertical_line,
x1=vertical_line,
xref="x2",
y0=0.0,
y1=1.0,
yref="paper",
type="line",
line=go.layout.shape.Line(width=2),
)
)
for trace in figure.data:
if type(trace) is go.Scatter:
key = trace["name"]
value = np.interp(vertical_line, values_dict[key].index, values_dict[key].values)
figure.add_shape(
go.layout.Shape(
x0=histogram_minimum,
x1=vertical_line,
xref="x2",
y0=value,
y1=value,
yref="y2",
type="line",
line=go.layout.shape.Line(width=2, color=trace["line"]["color"]),
layer="below",
)
)
# Add horizontal line.
if horizontal_line is not None:
figure.add_shape(
go.layout.Shape(
x0=0.0,
x1=1.0,
xref="paper",
y0=horizontal_line,
y1=horizontal_line,
yref="y2",
type="line",
line=go.layout.shape.Line(width=2),
)
)
for trace in figure.data:
if type(trace) is go.Scatter:
key = trace["name"]
value = np.interp(horizontal_line, values_dict[key].values, values_dict[key].index)
figure.add_shape(
go.layout.Shape(
x0=value,
x1=value,
xref="x2",
y0=0.0,
y1=horizontal_line,
yref="y2",
type="line",
line=go.layout.shape.Line(width=2, color=trace["line"]["color"]),
layer="below",
)
)
figure.update_layout(
title=title,
yaxis1_showticklabels=False,
xaxis1_side="top",
xaxis1_dtick=x_tick_interval,
xaxis1_showticklabels=True,
xaxis2_range=[histogram_minimum, histogram_maximum],
xaxis2_dtick=x_tick_interval,
xaxis2_title=f"{value_label} [{value_unit}]",
yaxis2_dtick=0.1,
yaxis2_title="Cumulative proportion",
legend=go.layout.Legend(x=0.99, xanchor="auto", y=0.05, yanchor="auto"),
)
mesmo.utils.write_figure_plotly(figure, os.path.join(results_path, filename))
def plot_histogram_node_utilization(
results_dict: mesmo.problems.ResultsDict,
results_path: str,
filename_base: str = "node_utilization",
filename_suffix: str = "",
plot_title: str = None,
histogram_bin_count: int = 30,
x_tick_interval: float = None,
):
# Pre-process values.
values_dict = dict.fromkeys(results_dict.keys())
box_values_dict = dict.fromkeys(results_dict.keys())
for key in values_dict:
# Obtain node voltage in p.u. values.
values_dict[key] = results_dict[key].node_voltage_magnitude_vector_per_unit
# Obtain maximum voltage drop.
values_dict[key] = 1.0 - values_dict[key].min()
# Keep these values for boxplot.
box_values_dict[key] = values_dict[key]
# Obtain histogram bins.
histogram_maximum =
|
pd.DataFrame(values_dict)
|
pandas.DataFrame
|
#!/usr/bin/env python
def baseline_model(input_dim=None, name=None,numclasses=None, neurons=16, layer=2, optimizer='adam', dropout=0):
# Discard any pre-existing version of the model.
model = Sequential(name=name)
model.add(Dense(neurons, input_dim=input_dim, activation='relu', name="storm_and_env_features"))
for i in range(layer-1):
model.add(Dropout(rate=dropout))
model.add(Dense(neurons, activation='relu'))
model.add(Dropout(rate=dropout))
model.add(Dense(numclasses, activation='sigmoid')) # used softmax in HWT_mode to add to 1
# Compile model with optimizer and loss function. MSE is same as brier_score.
loss="binary_crossentropy" # in HWT_mode, I used categorical_crossentropy
model.compile(loss=loss, optimizer=optimizer, metrics=[MeanSquaredError(), brier_skill_score, AUC(), "accuracy"])
return model
import argparse
import datetime
import G211
import glob
from hwtmode.data import decompose_circular_feature
import logging
import matplotlib.pyplot as plt
from ml_functions import brier_skill_score, rptdist2bool, get_glm
import numpy as np
import os
import pandas as pd
import pdb
import pickle
from sklearn.model_selection import train_test_split
import tensorflow.keras.backend
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.metrics import MeanSquaredError, AUC
from tensorflow.keras.models import Sequential, load_model
import sys
import time
import xarray
import yaml
def f0i(i):
return f"f{i:02d}"
def make_fhr_str(fhr):
fhr.sort()
seq = []
final = []
last = 0
for index, val in enumerate(fhr):
if last + 1 == val or index == 0:
seq.append(val)
last = val
else:
if len(seq) > 1:
final.append(f0i(seq[0]) + '-' + f0i(seq[len(seq)-1]))
else:
final.append(f0i(seq[0]))
seq = []
seq.append(val)
last = val
if index == len(fhr) - 1:
if len(seq) > 1:
final.append(f0i(seq[0]) + '-' + f0i(seq[len(seq)-1]))
else:
final.append(f0i(seq[0]))
final_str = '.'.join(map(str, final))
return final_str
logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO)
def main():
import pandas as pd # started getting UnboundLocalError: local variable 'pd' referenced before assignment Mar 1 2022 even though I import pandas above
# =============Arguments===================
parser = argparse.ArgumentParser(description = "train neural network",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--batchsize', type=int, default=512, help="nn training batch size") # tf default is 32
parser.add_argument("--clobber", action='store_true', help="overwrite any old outfile, if it exists")
parser.add_argument("-d", "--debug", action='store_true')
parser.add_argument("--dropouts", type=float, nargs="+", default=[0.0], help='fraction of neurons to drop in each hidden layer (0-1)')
parser.add_argument('--fhr', nargs="+", type=int, default=list(range(1,49)), help="forecast hour")
parser.add_argument('--fits', nargs="+", type=int, default=None, help="work on specific fit(s) so you can run many in parallel")
parser.add_argument('--nfits', type=int, default=10, help="number of times to fit (train) model")
parser.add_argument('--epochs', default=30, type=int, help="number of training epochs")
parser.add_argument('--flash', type=int, default=10, help="GLM flash threshold")
parser.add_argument('--layers', default=2, type=int, help="number of hidden layers")
parser.add_argument('--model', type=str, choices=["HRRR","NSC3km-12sec"], default="HRRR", help="prediction model")
parser.add_argument("--noglm", action='store_true', help='Do not use GLM')
parser.add_argument('--savedmodel', type=str, help="filename of machine learning model")
parser.add_argument('--neurons', type=int, nargs="+", default=[16], help="number of neurons in each nn layer")
parser.add_argument('--rptdist', type=int, default=40, help="severe weather report max distance")
parser.add_argument('--splittime', type=lambda s: pd.to_datetime(s), default="202012021200", help="train with storms before this time; test this time and after")
parser.add_argument('--suite', type=str, default='sobash', help="name for suite of training features")
parser.add_argument('--twin', type=int, default=2, help="time window in hours")
# Assign arguments to simple-named variables
args = parser.parse_args()
batchsize = args.batchsize
clobber = args.clobber
debug = args.debug
dropouts = args.dropouts
epochs = args.epochs
flash = args.flash
fhr = args.fhr
fits = args.fits
nfit = args.nfits
noglm = args.noglm
layer = args.layers
model = args.model
neurons = args.neurons
rptdist = args.rptdist
savedmodel = args.savedmodel
train_test_split_time = args.splittime
suite = args.suite
twin = args.twin
if debug:
logging.basicConfig(level=logging.DEBUG)
logging.info(args)
### saved model name ###
trained_models_dir = '/glade/work/ahijevyc/NSC_objects'
if savedmodel:
pass
else:
fhr_str = make_fhr_str(fhr) # abbreviate list of forecast hours with hyphens (where possible) so model name is not too long for tf.
savedmodel = f"{model}.{suite}.{flash}flash_{twin}hr.rpt_{rptdist}km_{twin}hr.{neurons[0]}n.ep{epochs}.{fhr_str}.bs{batchsize}.{layer}layer"
logging.info(f"savedmodel={savedmodel}")
##################################
#mask = pickle.load(open('/glade/u/home/sobash/2013RT/usamask.pk', 'rb'))
mask = pickle.load(open('./usamask.pk', 'rb'))
height, width = 65,93
mask = mask.reshape((height,width))
mask = xarray.DataArray(mask,dims=["y","x"])
if False:
ax = plt.axes(projection = G211.g211)
xs = G211.xs
ys = G211.ys
logging.info(f"Read {model} predictors. Use parquet file, if it exists. If it doesn't exist, create it.")
if model == "HRRR":
alsoHRRRv4 = False
ifile = f'/glade/work/ahijevyc/NSC_objects/{model}/HRRRX.32bit.par'
ifile = f'/glade/work/ahijevyc/NSC_objects/{model}/HRRRX.32bit.noN7.par'
if alsoHRRRv4: ifile = f'/glade/work/ahijevyc/NSC_objects/{model}/HRRRXHRRR.32bit.noN7.par'
scalingfile = f"/glade/work/ahijevyc/NSC_objects/{model}/scaling_values_all_HRRRX.pk"
elif model == "NSC3km-12sec":
ifile = f'{model}.par'
scalingfile = f"scaling_values_{model}.pk"
if os.path.exists(ifile):
logging.info(f'reading {ifile}')
df = pd.read_parquet(ifile, engine="pyarrow")
else:
# Define ifiles, a list of input files from glob.glob method
if model == "HRRR":
search_str = f'/glade/work/sobash/NSC_objects/HRRR_new/grid_data/grid_data_HRRRX_d01_20*00-0000.par' # just 00z
ifiles = glob.glob(search_str)
if alsoHRRRv4:
search_str = f'/glade/work/sobash/NSC_objects/HRRR_new/grid_data/grid_data_HRRR_d01_202[01]*00-0000.par' # HRRR, not HRRRX. no 2022 (yet)
ifiles.extend(glob.glob(search_str))
elif model == "NSC3km-12sec":
search_str = f'/glade/work/sobash/NSC_objects/grid_data/grid_data_{model}_d01_201*00-0000.par'
ifiles = glob.glob(search_str)
# remove larger neighborhood size (fields containing N7 in the name)
df = pd.read_parquet(ifiles[0], engine="pyarrow")
columns = df.columns
N7_columns = [x for x in df.columns if "-N7" in x]
if "noN7" in suite:
logging.debug(f"ignoring {len(N7_columns)} N7 columns: {N7_columns}")
columns = set(df.columns) - set(N7_columns)
# all columns including severe reports
logging.info(f"Reading {len(ifiles)} {model} files {search_str}")
df = pd.concat( pd.read_parquet(ifile, engine="pyarrow", columns=columns) for ifile in ifiles)
logging.info("done")
df["valid_time"] =
|
pd.to_datetime(df["Date"])
|
pandas.to_datetime
|
import warnings
warnings.filterwarnings("ignore")
import os
import json
import argparse
import time
import datetime
import json
import pickle
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import tensorflow as tf
from scipy.stats import spearmanr, mannwhitneyu
import scipy.cluster.hierarchy as shc
from skbio.stats.composition import clr
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold
from scipy.cluster.hierarchy import cut_tree
from src.models.MiMeNet import MiMeNet, tune_MiMeNet
###################################################
# Read in command line arguments
###################################################
parser = argparse.ArgumentParser(description='Perform MiMeNet')
parser.add_argument('-micro', '--micro', help='Comma delimited file representing matrix of samples by microbial features', required=True)
parser.add_argument('-metab', '--metab', help= 'Comma delimited file representing matrix of samples by metabolomic features', required=True)
parser.add_argument('-external_micro', '--external_micro', help='Comma delimited file representing matrix of samples by microbial features')
parser.add_argument('-external_metab', '--external_metab', help= 'Comma delimited file representing matrix of samples by metabolomic features')
parser.add_argument('-annotation', '--annotation', help='Comma delimited file annotating subset of metabolite features')
parser.add_argument('-labels', '--labels', help="Comma delimited file for sample labels to associate clusters with")
parser.add_argument('-output', '--output', help='Output directory', required=True)
parser.add_argument('-net_params', '--net_params', help='JSON file of network hyperparameters', default=None)
parser.add_argument('-background', '--background', help='Directory with previously generated background', default=None)
parser.add_argument('-num_background', '--num_background', help='Number of background CV Iterations', default=100, type=int)
parser.add_argument('-micro_norm', '--micro_norm', help='Microbiome normalization (RA, CLR, or None)', default='CLR')
parser.add_argument('-metab_norm', '--metab_norm', help='Metabolome normalization (RA, CLR, or None)', default='CLR')
parser.add_argument('-threshold', '--threshold', help='Define significant correlation threshold', default=None)
parser.add_argument('-num_run_cv', '--num_run_cv', help='Number of iterations for cross-validation', default=1, type=int)
parser.add_argument('-num_cv', '--num_cv', help='Number of cross-validated folds', default=10, type=int)
parser.add_argument('-num_run', '--num_run', help='Number of iterations for training full model', type=int, default=10)
args = parser.parse_args()
micro = args.micro
metab = args.metab
external_micro = args.external_micro
external_metab = args.external_metab
annotation = args.annotation
out = args.output
net_params = args.net_params
threshold = args.threshold
micro_norm = args.micro_norm
metab_norm = args.metab_norm
num_run_cv = args.num_run_cv
num_cv = args.num_cv
num_run = args.num_run
background_dir = args.background
labels = args.labels
num_bg = args.num_background
tuned = False
gen_background = True
if background_dir != None:
gen_background = False
start_time = time.time()
if external_metab != None and external_micro == None:
print("Warning: External metabolites found with no external microbiome...ignoring external set!")
external_metab = None
if net_params != None:
print("Loading network parameters...")
try:
with open(net_params, "r") as infile:
params = json.load(infile)
num_layer = params["num_layer"]
layer_nodes = params["layer_nodes"]
l1 = params["l1"]
l2 = params["l2"]
dropout = params["dropout"]
learning_rate = params["lr"]
tuned = True
print("Loaded network parameters...")
except:
print("Warning: Could not load network parameter file!")
###################################################
# Load Data
###################################################
metab_df = pd.read_csv(metab, index_col=0)
micro_df = pd.read_csv(micro, index_col=0)
if external_metab != None:
external_metab_df = pd.read_csv(external_metab, index_col=0)
if external_micro != None:
external_micro_df = pd.read_csv(external_micro, index_col=0)
###################################################
# Filter only paired samples
###################################################
samples = np.intersect1d(metab_df.columns.values, micro_df.columns.values)
num_samples = len(samples)
metab_df = metab_df[samples]
micro_df = micro_df[samples]
for c in micro_df.columns:
micro_df[c] = pd.to_numeric(micro_df[c])
for c in metab_df.columns:
metab_df[c] =
|
pd.to_numeric(metab_df[c])
|
pandas.to_numeric
|
# This gets all the census data, can be filted by level and state.
# Should play with all the chunk sizes, to see how that affects speed. I'm leaving a message in censusreporter_api.py for now that will alert you if the size gets too big and it does a json_merge. json_merge is slow, we want to avoid those.
import pandas as pd
from censusreporter_api import *
import os
from io import BytesIO
import io
from zipfile import ZipFile
import requests
import datetime
import re
import argparse
from bs4 import BeautifulSoup
def getTractInfo(url, regex=''):
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
return [url + '/' + node.get('href') for node in soup.find_all('a', href=re.compile(regex))]
BASE_URL = "http://www2.census.gov/geo/docs/maps-data/data/gazetteer/"
YEAR = datetime.datetime.now().year
GAZ_YEAR_URL = '{}{}_Gazetteer/'.format(BASE_URL, YEAR)
# For easier Windows compatibility
OUTPUT_DIR = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.getcwd())))),
'dimensionaldata'
)
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
STATE_LIST = [ 'AL','AK','AZ','AR','CA','CO','CT','DE','DC','FL','GA','HI','ID','IL','IN','IA','KS','KY','LA','ME','MD','MA','MI','MN','MS','MO','MT','NE','NV','NH','NJ','NM','NY','NC','ND','OH','OK','OR','PA','RI','SC','SD','TN','TX','UT','VT','VA','WA','WV','WI','WY','PR']
STATE_CODES = {'AL': '01','AK': '02','AZ': '04','AR': '05','CA': '06','CO': '08','CT': '09','DE': '10','DC': '11','FL': '12','GA': '13','HI': '15','ID': '16','IL': '17','IN': '18','IA': '19','KS': '20','KY': '21','LA': '22','ME': '23','MD': '24','MA': '25','MI': '26','MN': '27','MS': '28','MO': '29','MT': '30','NE': '31','NV': '32','NH': '33','NJ': '34','NM': '35','NY': '36','NC': '37','ND': '38','OH': '39','OK': '40','OR':'41','PA': '42','RI': '44','SC': '45','SD': '46','TN': '47','TX': '48','UT': '49','VT': '50','VA': '51','WA': '53','WV': '54','WI': '55','WY': '56','PR':'72'}
STATE_ABBREVS = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
DATA_TABLES = ['B01001','B03002','B06008','B23001','B19001','B25009','B25077']
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--states", help="State Abbreviation List, space seperated ie NY AK", nargs="*")
parser.add_argument("-t", "--type", help="ALL|County|Upper|Lower|Congress|City|State|Tract space separated", nargs="*")
def get_combinedData(thePD=None, tables=None):
geoids = thePD.index.tolist()
try:
dFrame = get_dataframe(geoids=geoids, tables=tables)
except Exception as e: #This should never happen, it's handled in censusreporter_api but just in case...
handledError = "release doesn't include GeoID(s) "
errorMsg = str(e)
print(errorMsg)
if handledError in errorMsg:
pattern = re.compile("^\s+|\s*,\s*|\s+$")
geoList = pattern.split(errorMsg.partition(handledError)[2].replace(".", ""))
thePD = thePD[-thePD.index.isin(geoList)]
#If everything was not valid, then we'll just return nothing
if len(thePD) == 0:
return None
return get_combinedData(thePD, tables)
else:
raise
else:
return dFrame
return None
def get_zip(file_url):
url = requests.get(file_url)
zipfile = ZipFile(BytesIO(url.content), 'r')
zip_names = zipfile.namelist()
if len(zip_names) == 1:
file_name = zip_names.pop()
extracted_file = zipfile.open(file_name).read()
return extracted_file
# Util for cleaning up column names of extra whitespace
def strip_colnames(df):
all_cols = df.columns.values.tolist()
col_dict = {}
for col in all_cols:
col_dict[col] = col.strip()
return df.rename(columns=col_dict)
# Gets voter_file_id from different jurisdiction types
def parse_voter_file_id(row):
if str(row['GEOID']).endswith('ZZ'):
return None
# If not ZZ, return letter for district (Alaska has lettered districts)
if not str(row['GEOID'])[-1:].isdigit():
return str(row['GEOID'])[-1:]
# Multiplier is 100 for congress, 1000 for all other types
if row['ENTITYTYPE'] == 'congress':
state_mult = 100
else:
state_mult = 1000
voter_file_id = int(row['GEOID']) - (int(row['STATEFP']) * state_mult)
# Some states with 1 district return 0, return 1 for those
if voter_file_id > 0:
return str(voter_file_id)
else:
return '1'
def get_census_data(geo_type, geo_url, state_list, fips_func,
state_idx=(0, 0),
census_tables=DATA_TABLES,
find_zz=False,
delim='\t',
chunk_size=250):
print("Working " + geo_type)
if requests.get(geo_url).status_code != 200:
raise ValueError("{} file not found at URL: {}".format(geo_type, geo_url))
# Changing source if city URL
if geo_type != 'City' and geo_type != "Tract":
csv_file = get_zip(geo_url)
file_source = io.StringIO(csv_file.decode('cp1252'))
else:
file_source = geo_url
reader = pd.read_csv(file_source,
delimiter=delim,
iterator=True,
chunksize=chunk_size)
context_df_list = []
census_df_list = []
for chunk in reader:
if geo_type == "Tract":
chunk.rename(columns={'CODE': 'GEOID'}, inplace=True)
chunk['USPS'] = state_list[0] #Tracts are passed in one state at a time, but don't have this field
else:
chunk = chunk.loc[chunk['USPS'].isin(state_list)]
if find_zz:
chunk['GEOID'] = chunk['GEOID'].astype(str)
chunk = chunk.loc[chunk['GEOID'].str.find('ZZ') == -1]
if len(chunk) > 0:
chunk['FIPS'] = chunk['GEOID'].apply(fips_func)
context_df_list.append(chunk)
chunk = chunk.set_index('FIPS')
data = get_combinedData(chunk, tables=census_tables)
census_df_list.append(data)
context_df = pd.concat(context_df_list)
census_df = pd.concat(census_df_list)
context_df['STATEFP'] = context_df['GEOID'].apply(
lambda x: str(x)[:state_idx[0]].zfill(state_idx[1])
)
context_df['ENTITYTYPE'] = geo_type.lower()
# Check if no census data returned, then just return context info
if len(census_df.columns.values.tolist()) == 0:
return strip_colnames(context_df.set_index('FIPS'))
census_df = census_df.rename(columns={'GEOID': 'FIPS'})
census_df = strip_colnames(census_df.set_index('FIPS'))
context_df = strip_colnames(context_df.set_index('FIPS'))
data = context_df.join(census_df)
return data
# State process is different enough to warrant its own function
def get_state(state_list, state_codes, census_tables=DATA_TABLES):
print("Starting State")
df = pd.DataFrame()
cTemp = [] #I know there is a better way, but this works for me
for state in state_list:
cTemp.append([state, state_codes[state]])
c = pd.DataFrame(cTemp, columns=['USPS', 'GEOID'])
c['FIPS'] = c['GEOID'].apply(lambda x: "04000US" + str(x).zfill(2))
c = strip_colnames(c.set_index('FIPS'))
data = get_combinedData(c, tables=census_tables)
print("data Size: " + str(len(data)))
df = df.append(data)
c['STATEFP'] = state_codes[state]
c['ENTITYTYPE'] = "state"
df = df.rename(columns={'GEOID': 'FIPS'})
df = strip_colnames(df.set_index('FIPS'))
data = c.join(df)
return data
if __name__ == '__main__':
args = parser.parse_args()
print("Writing to "+OUTPUT_DIR)
if args.states is None:
state_list = STATE_LIST
else:
state_list = [element.upper() for element in args.states]
if args.type is None:
types = 'ALL'
else:
types = [element.upper() for element in args.type]
for state in state_list:
if state not in STATE_CODES:
raise ValueError("Unknown state: " + state)
# Verify Gazetteer URL
while requests.get(GAZ_YEAR_URL).status_code != 200:
YEAR -= 1
GAZ_YEAR_URL = '{}{}_Gazetteer/'.format(BASE_URL, YEAR)
print(GAZ_YEAR_URL)
FILE_BASE_URL = GAZ_YEAR_URL + str(YEAR) + "_Gaz_"
output_df = pd.DataFrame()
if types == 'ALL' or "COUNTY" in types:
county_df = get_census_data(
'County',
FILE_BASE_URL + 'counties_national.zip',
state_list,
lambda x: "05000US{0:0=5d}".format(int(x)),
state_idx=(-3, 2)
)
county_df['VOTER_FILE_ID'] = county_df.apply(
parse_voter_file_id,
axis=1
)
output_df = output_df.append(county_df)
if types == 'ALL' or "CONGRESS" in types:
"""
Now we do congressional districts. These are numbered, so we need to guess
which one it is. We'll start with the year and subtract 1789 (first congress)
and divide by 2 (2 year terms), then we'll add 2 more since they don't really
end on the right year and we want to make sure we get it right. Then we'll
test the URL and keep removing 1 until we find it.
"""
congress = int((YEAR - 1789) / 2) + 2
conYearURL = FILE_BASE_URL + str(congress) + "CDs_national.zip"
while requests.get(conYearURL).status_code != 200:
if congress < 115: #Using 115 as when I wrote this code that was the current number, so I know that exists
raise ValueError("Crap, can't find congress file at: " + conYearURL)
congress -= 1
conYearURL = FILE_BASE_URL + str(congress) + "CDs_national.zip"
congress_df = get_census_data(
'Congress',
conYearURL,
state_list,
lambda x: "50000US" + str(x).zfill(4),
state_idx=(-2, 2)
)
congress_df['VOTER_FILE_ID'] = congress_df.apply(
parse_voter_file_id,
axis=1
)
congress_df['NAME'] = congress_df['VOTER_FILE_ID'].apply(
lambda x: 'Congressional District {}'.format(x) if x else None
)
output_df = pd.concat([output_df, congress_df])
if types == 'ALL' or "LOWER" in types:
state_house_df = get_census_data(
'Lower House',
FILE_BASE_URL + "sldl_national.zip",
state_list,
lambda x: "62000US" + str(x).zfill(5),
state_idx=(-3, 2),
find_zz=True
)
state_house_df['VOTER_FILE_ID'] = state_house_df.apply(
parse_voter_file_id,
axis=1
)
output_df = pd.concat([output_df, state_house_df])
if types == 'ALL' or "UPPER" in types:
upper_house_df = get_census_data(
'Upper House',
FILE_BASE_URL + "sldu_national.zip",
state_list,
lambda x: "61000US" + str(x).zfill(5),
state_idx=(-3, 2),
find_zz=True
)
upper_house_df['VOTER_FILE_ID'] = upper_house_df.apply(
parse_voter_file_id,
axis=1
)
output_df = pd.concat([output_df, upper_house_df])
# School Districts: high school pattern is: 96000US0400450,
# elementary school district pattern is: 95000US0400005
if types == 'ALL' or "CITY" in types:
city_base_url = GAZ_YEAR_URL + str(YEAR)
city_df_list = []
"""
Instead of building iteration in to the city function, iterate through,
supplying each base URL, and give each one a state list with only the state
pulled in the URL
"""
for state in state_list:
city_url = '{}_gaz_place_{}.txt'.format(city_base_url, STATE_CODES[state])
state_city_df = get_census_data(
'City',
city_url,
[state],
lambda x: "16000US" + str(x).zfill(7),
state_idx=(-5, 2)
)
city_df_list.append(state_city_df)
city_df = pd.concat(city_df_list)
output_df = pd.concat([output_df, city_df])
if types == 'ALL' or "STATE" in types:
state_df = get_state(state_list, STATE_CODES)
state_df['NAME'] = state_df['USPS'].apply(lambda x: STATE_ABBREVS[x])
output_df = pd.concat([output_df, state_df])
if types == 'ALL' or "TRACT" in types:
tracts_df_list = []
div_tract_df_list = []
temp_tract_df_list = []
loop = 0
for state in state_list:
tract_url = 'http://www2.census.gov/geo/maps/dc10map/tract/st{}_{}'.format(STATE_CODES[state], state.lower())
if state == 'PR':
tract_url = tract_url + "_1" #PR Just had to be different
print(tract_url)
for division in getTractInfo(tract_url, "^[^#\.]+_"):
for tract_file in getTractInfo(division[:-1], "\.txt$"): #Just in case there are more than one
print(tract_file)
if "SP_" not in tract_file: #Some have Spanish langage copies, we don't need that
temp_tract_df = get_census_data(
'Tract',
tract_file,
[state],
lambda x: "14000US" + str(x).zfill(11),
state_idx=(-9, 2),
delim=';',
chunk_size=200
)
temp_tract_df_list.append(temp_tract_df)
div_tract_df_list.append(pd.concat(temp_tract_df_list))
tracts_df_list.append(pd.concat(div_tract_df_list))
tract_df = pd.concat(tracts_df_list)
output_df =
|
pd.concat([output_df, tract_df])
|
pandas.concat
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 4 16:22:57 2020
@author: Natalie
"""
import os
import sys
import click
import pickle
import pandas as pd
import numpy as np
import geopandas as gpd
import imageio
from shapely.geometry import Point
import json
from bokeh.io import output_file
from bokeh.plotting import figure, show
from bokeh.models import (BasicTicker, CDSView, ColorBar, ColumnDataSource,
CustomJS, CustomJSFilter, FactorRange,
GeoJSONDataSource, HoverTool, Legend,
LinearColorMapper, PrintfTickFormatter, Slider, Whisker)
from bokeh.layouts import row, column, gridplot, grid, widgetbox
from bokeh.models.widgets import Tabs, Panel
from bokeh.palettes import brewer
from bokeh.transform import transform, factor_cmap
import click # command-line interface
from yaml import load, dump, SafeLoader # pyyaml library for reading the parameters.yml file
from microsim.column_names import ColumnNames
# Functions for preprocessing
# ---------------------------
def calc_nr_days(data_file):
# figure out nr days by reading in e.g. retail dangers pickle file of run 0
pickle_in = open(data_file,"rb")
dangers = pickle.load(pickle_in)
pickle_in.close()
filter_col = [col for col in dangers if col.startswith(ColumnNames.LOCATION_DANGER)]
# don't use the column simply called 'Danger'
filter_col = filter_col[1:len(filter_col)]
nr_days = len(filter_col)
return nr_days
def create_venue_dangers_dict(locations_dict,r_range,data_dir,start_day,end_day,start_run,nr_runs):
'''
Reads in venue pickle files (venues from locations_dict) and populates dangers_dict_3d (raw data: venue, day, run), dangers_dict (mean across runs) and dangers_dict_std (standard deviation across runs)
Possible output includes:
dangers_dict # mean (value to be plotted)
dangers_dict_std # standard deviation (could plot as error bars)
dangers_dict_3d # full 3D data (for debugging)
'''
dangers_dict = {}
dangers_dict_std = {}
dangers_dict_3d = {}
for key, value in locations_dict.items():
#for r in range(nr_runs):
for r in r_range:
data_file = os.path.join(data_dir, f"{r}",f"{locations_dict[key]}.pickle")
pickle_in = open(data_file,"rb")
dangers = pickle.load(pickle_in)
pickle_in.close()
filter_col = [col for col in dangers if col.startswith('Danger')]
# don't use the column simply called 'Danger'
filter_col = filter_col[1:len(filter_col)]
#nr_days = len(filter_col)
# # set row index to ID
# dangers.set_index('ID', inplace = True)
dangers_colnames = filter_col[start_day:end_day+1]
dangers_rownames = dangers.index
dangers_values = dangers[filter_col[start_day:end_day+1]]
if r == start_run:
dangers_3d = np.zeros((dangers.shape[0],dangers_values.shape[1],nr_runs))
dangers_3d[:,:,r-start_run] = dangers_values
dangers_dict_3d[key] = dangers_3d
dangers_dict[key] = pd.DataFrame(data=dangers_3d.mean(axis=2), index=dangers_rownames, columns=dangers_colnames)
dangers_dict_std[key] = pd.DataFrame(data=dangers_3d.std(axis=2), index=dangers_rownames, columns=dangers_colnames)
return dangers_dict, dangers_dict_std, dangers_dict_3d
def create_difference_dict(dict_sc0,dict_sc1,lookup_dict):
dict_out = {}
for key, value in lookup_dict.items():
dict_out[key] = dict_sc1[key].subtract(dict_sc0[key])
return dict_out
def create_msoa_dangers_dict(dangers_dict,keys,msoa_codes):
'''
Converts dangers_dict to MSOA level data for the appropriate venue types. Produces average danger score (sum dangers in MSOA / total nr venues in MSOA)
Output: dangers_msoa_dict
'''
dangers_msoa_dict = {}
for k in range(0,len(keys)):
dangers = dangers_dict[keys[k]]
msoa_code = msoa_codes[k]
dangers['MSOA'] = msoa_code
# count nr for this condition per area
msoa_sum = dangers.groupby(['MSOA']).agg('sum')
msoa_count = dangers.groupby(['MSOA']).agg('count')
msoa_avg = msoa_sum.div(msoa_count, axis='index')
dangers_msoa_dict[keys[k]] = msoa_avg
return dangers_msoa_dict
def create_counts_dict(conditions_dict,r_range,data_dir,start_day,end_day,start_run,nr_runs,age_cat):
'''
Counts per condition (3D, mean and standard deviation)
Produces 5 types of counts:
msoacounts: nr per msoa and day
agecounts: nr per age category and day
totalcounts: nr per day (across all areas)
cumcounts: nr per MSOA and day
uniquecounts: nr with 'final' disease status across time period e.g. someone who is presymptomatic, symptomatic and recoverd is only counted once as recovered
Output:
msoas # list of msoas
totalcounts_dict, cumcounts_dict, agecounts_dict, msoacounts_dict, cumcounts_dict_3d, totalcounts_dict_std, cumcounts_dict_std, agecounts_dict_std, msoacounts_dict_std, totalcounts_dict_3d, agecounts_dict_3d, msoacounts_dict_3d, uniquecounts_dict_3d, uniquecounts_dict_std, uniquecounts_dict
'''
# start with empty dictionaries
msoas = []
msoacounts_dict_3d = {}
totalcounts_dict_3d = {}
cumcounts_dict_3d = {}
agecounts_dict_3d = {}
uniquecounts_dict_3d = {}
msoacounts_dict = {}
agecounts_dict = {}
totalcounts_dict = {}
cumcounts_dict = {}
uniquecounts_dict = {}
msoacounts_dict_std = {}
agecounts_dict_std = {}
totalcounts_dict_std = {}
cumcounts_dict_std = {}
uniquecounts_dict_std = {}
nr_days = end_day - start_day + 1
dict_days = [] # empty list for column names 'Day0' etc
for d in range(start_day, end_day+1):
dict_days.append(f'Day{d}')
age_cat_str = []
for a in range(age_cat.shape[0]):
age_cat_str.append(f"{age_cat[a,0]}-{age_cat[a,1]}")
# first, create 3d dictionaries
for r in r_range:
# read in pickle file individuals (disease status)
data_file = os.path.join(data_dir, f"{r}", "Individuals.pickle")
pickle_in = open(data_file,"rb")
individuals_tmp = pickle.load(pickle_in)
pickle_in.close()
# if first ever run, keep copy and initialise 3D frame for aggregating
if r == start_run:
individuals = individuals_tmp.copy()
msoas.extend(sorted(individuals.area.unique())) # populate list of msoas (previously empty outside this function)
area_individuals = individuals['area'] # keep area per person to use later
# next bit of code is to restrict to user specified day range
# first, find all columns starting with disease_status
filter_col = [col for col in individuals if col.startswith('disease_status')]
# don't use the column simply called 'disease_status'
filter_col = filter_col[1:len(filter_col)]
counts_colnames = filter_col[start_day:end_day+1]
# User defined age brackets
individuals.insert(7, 'Age0', np.zeros((len(individuals),1)))
for a in range(age_cat.shape[0]):
individuals['Age0'] = np.where((individuals['age'] >= age_cat[a,0]) & (individuals['age'] <= age_cat[a,1]), a+1, individuals['Age0'])
age_cat_col = individuals['Age0'].values
# temporary workaround if no continuous age
#age_cat_col = individuals['Age1'].values
# add age brackets column to individuals_tmp
individuals_tmp.insert(7, 'Age0', age_cat_col)
uniquecounts_df = pd.DataFrame()
# select right columns
subset = individuals_tmp[counts_colnames]
for key, value in conditions_dict.items():
#print(key)
if r == start_run:
msoacounts_dict_3d[key] = np.zeros((len(msoas),nr_days,nr_runs))
cumcounts_dict_3d[key] = np.zeros((len(msoas),nr_days,nr_runs))
agecounts_dict_3d[key] = np.zeros((age_cat.shape[0],nr_days,nr_runs))
totalcounts_dict_3d[key] = np.zeros((nr_days,nr_runs))
uniquecounts_dict_3d[key] = np.zeros(nr_runs)
# find all rows with condition (dict value)
indices = subset[subset.eq(value).any(1)].index
# create new df of zeros and replace with 1 at indices
cumcounts_end = pd.DataFrame(np.zeros((subset.shape[0], 1)))
cumcounts_end.loc[indices] = 1
uniquecounts_df[key] = cumcounts_end.values[:,0]
# loop aroud days
msoacounts_run = np.zeros((len(msoas),nr_days))
cumcounts_run = np.zeros((len(msoas),nr_days))
agecounts_run = np.zeros((age_cat.shape[0],nr_days))
for day in range(0, nr_days):
#print(day)
# count nr for this condition per area
msoa_count_temp = individuals_tmp[subset.iloc[:,day] == conditions_dict[key]].groupby(['area']).agg({subset.columns[day]: ['count']})
if msoa_count_temp.shape[0] == len(msoas):
msoa_count_temp = msoa_count_temp.values
msoacounts_run[:,day] = msoa_count_temp[:, 0]
elif msoa_count_temp.empty == False:
#print('check MSOAs')
# in case some entries don't exist
# start with empty dataframe
tmp_df = pd.DataFrame(np.zeros(len(msoas)), columns = ['tmp'], index=msoas)
# drop multiindex to prevent warning msg
msoa_count_temp.columns = msoa_count_temp.columns.droplevel(0)
# merge with obtained counts - NaN will appear
tmp_df = pd.merge(tmp_df, msoa_count_temp, how='left', left_index=True,right_index=True)
# replace NaN by 0
tmp_df = tmp_df.fillna(0)
msoacounts_run[:,day] = tmp_df.iloc[:,1].values
# cumulative counts
# select right columns
tmp_cum = subset.iloc[:,0:day+1]
indices = tmp_cum[tmp_cum.eq(value).any(1)].index
# create new df of zeros and replace with 1 at indices
tmp_df = pd.DataFrame(np.zeros((tmp_cum.shape[0], 1)))
tmp_df.loc[indices] = 1
# merge with MSOA df
tmp_df = tmp_df.merge(area_individuals, left_index=True, right_index=True)
cumcounts_tmp = tmp_df.groupby(['area']).sum()
if cumcounts_tmp.shape[0] == len(msoas):
cumcounts_tmp = cumcounts_tmp.values
cumcounts_run[:,day] = cumcounts_tmp[:, 0]
elif cumcounts_tmp.empty == False:
#print('check MSOAs')
# in case some entries don't exist
# start with empty dataframe
tmp_df = pd.DataFrame(np.zeros(len(msoas)), columns = ['tmp'], index=msoas)
# drop multiindex to prevent warning msg
cumcounts_tmp.columns = cumcounts_tmp.columns.droplevel(0)
# merge with obtained counts - NaN will appear
tmp_df = pd.merge(tmp_df, cumcounts_tmp, how='left', left_index=True,right_index=True)
# replace NaN by 0
tmp_df = tmp_df.fillna(0)
cumcounts_run[:,day] = tmp_df.iloc[:,1].values
# count nr for this condition per age bracket
age_count_temp = individuals_tmp[subset.iloc[:,day] == conditions_dict[key]].groupby(['Age0']).agg({subset.columns[day]: ['count']})
if age_count_temp.shape[0] == 6:
age_count_temp = age_count_temp.values
agecounts_run[:,day] = age_count_temp[:, 0]
elif age_count_temp.empty == False:
# in case some entries don't exist
# start with empty dataframe
tmp_df = pd.DataFrame(np.zeros(age_cat.shape[0]), columns = ['tmp'], index=list(range(1,age_cat.shape[0]+1)))
# drop multilevel index to prevent warning msg
age_count_temp.columns = age_count_temp.columns.droplevel(0)
# merge with obtained counts - NaN will appear
tmp_df = pd.merge(tmp_df, age_count_temp, how='left', left_index=True,right_index=True)
# replace NaN by 0
tmp_df = tmp_df.fillna(0)
agecounts_run[:,day] = tmp_df.iloc[:,1].values
#age_count_temp.loc['2'].count
# get current values from dict
msoacounts = msoacounts_dict_3d[key]
cumcounts = cumcounts_dict_3d[key]
agecounts = agecounts_dict_3d[key]
totalcounts = totalcounts_dict_3d[key]
# add current run's values
msoacounts[:,:,r-start_run] = msoacounts_run
cumcounts[:,:,r-start_run] = cumcounts_run
agecounts[:,:,r-start_run] = agecounts_run
totalcounts[:,r-start_run] = msoacounts_run.sum(axis=0)
# write out to dict
msoacounts_dict_3d[key] = msoacounts
cumcounts_dict_3d[key] = cumcounts
agecounts_dict_3d[key] = agecounts
totalcounts_dict_3d[key] = totalcounts
uniquecounts_df[key] = uniquecounts_df[key]*(value+1)
uniquecounts_df['maxval'] = uniquecounts_df.max(axis = 1)
for key, value in conditions_dict.items():
# get current values from dict
uniquecounts = uniquecounts_dict_3d[key]
# add current run's values
uniquecounts[r-start_run] = uniquecounts_df[uniquecounts_df.maxval == (value+1)].shape[0]
# write out to dict
uniquecounts_dict_3d[key] = uniquecounts
# next, create mean and std
for key, value in conditions_dict.items():
# get current values from dict
msoacounts = msoacounts_dict_3d[key]
cumcounts = cumcounts_dict_3d[key]
agecounts = agecounts_dict_3d[key]
totalcounts = totalcounts_dict_3d[key]
uniquecounts = uniquecounts_dict_3d[key]
# aggregate
msoacounts_std = msoacounts.std(axis=2)
msoacounts = msoacounts.mean(axis=2)
cumcounts_std = cumcounts.std(axis=2)
cumcounts = cumcounts.mean(axis=2)
agecounts_std = agecounts.std(axis=2)
agecounts = agecounts.mean(axis=2)
totalcounts_std = totalcounts.std(axis=1)
totalcounts = totalcounts.mean(axis=1)
uniquecounts_std = uniquecounts.std()
uniquecounts = uniquecounts.mean()
# write out to dict
msoacounts_dict[key] = pd.DataFrame(data=msoacounts, index=msoas, columns=dict_days)
msoacounts_dict_std[key] = pd.DataFrame(data=msoacounts_std, index=msoas, columns=dict_days)
cumcounts_dict[key] = pd.DataFrame(data=cumcounts, index=msoas, columns=dict_days)
cumcounts_dict_std[key] = pd.DataFrame(data=cumcounts_std, index=msoas, columns=dict_days)
agecounts_dict[key] =
|
pd.DataFrame(data=agecounts, index=age_cat_str, columns=dict_days)
|
pandas.DataFrame
|
import math
import multiprocessing
import time
from functools import lru_cache, partial
from multiprocessing import Pool
import pandas as pd
from numpy.random import shuffle
from retry.api import retry_call
from ..mongodb import get_db
from ..scripts.trading_calendar import is_trading_day
from ..setting.constants import MAX_WORKER
from ..utils import batch_loop, data_root, ensure_dtypes, make_logger
from ..utils.db_utils import to_dict
from ..websource.wy import fetch_cjmx
logger = make_logger('成交明细')
DATE_FMT = r'%Y-%m-%d'
def _last_5():
"""最近的5个交易日"""
db = get_db()
try:
return db['交易日历'].find_one()['last_month'][-5:]
except Exception:
today = pd.Timestamp('today').normalize()
dates = pd.date_range(today - pd.Timedelta(days=5), today)
return [d.to_pydatetime() for d in dates]
def _wy_fix_data(df):
dts = df.日期.dt.strftime(DATE_FMT) + ' ' + df.时间
df['成交时间'] = pd.to_da
|
tetime(dts)
|
pandas.to_datetime
|
from alpaca import Alpaca
from utils import to_time_series_dataset, split_df, TimeSeriesResampler, confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from sklearn.pipeline import Pipeline
import time
import numpy as np
import pandas as pd
# Variables
repetitions = 2
if __name__ == "__main__":
# For both datasets
for dataset in ['uc1']:
print("Dataset: ", dataset)
results = []
#timing = []
#outliers = []
if dataset == 'uc1':
X, y = split_df(pd.read_pickle('..\\data\\df_uc1.pkl'),
index_column='run_id',
feature_columns=['fldPosition', 'fldCurrent'],
target_name='target')
# Length of timeseries for resampler and cnn
sz = [38,41]
# Number of channels for cnn
num_channels = len(X[0][0])
# Number of classes for cnn
num_classes = np.unique(y).shape[0]
elif dataset == 'uc2':
X, y = split_df(
|
pd.read_pickle('..\\data\\df_uc2.pkl')
|
pandas.read_pickle
|
# -*- coding: utf-8 -*-
# Original Code by <NAME> for VOST Portugal
# 18 MAR 2022
# -----------------------------------------------
# LIBRARIES
# -----------------------------------------------
# Import Dash and Dash Bootstrap Components
import dash
import dash_bootstrap_components as dbc
from dash import Input, Output, dcc, html
# Import Core Libraries
import pandas as pd
import plotly.express as px
# -----------------------------------------------
# APP STARTS HERE
# -----------------------------------------------
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP], title='CONFIRM - BAJATT 2022', update_title=None,
meta_tags=[{"name": "viewport", "content": "width=device-width, initial-scale=0.7, minimum-scale=0.4"}],
)
server = app.server
CONFIRM_LOGO = app.get_asset_url('CONFIRM_Logotype.png')
color_map = {
"WARNING":"#C81D25",
"ACIDENTE":"#4F5D75",
"AVARIA MECÂNICA":"#DE6E4B",
"DESISTÊNCIA CONFIRMADA":"#2D3142",
"DESISTÊNCIA NÃO CONFIRMADA":"#242424"
}
app.layout = dbc.Container(
[
dbc.Row(
[
# AUTOMATIC UPDATER
dcc.Interval(
id='interval-component',
interval=20*1000, # in milliseconds
n_intervals=0
),
dbc.Col(
[
dbc.Row(
[
dbc.Row(html.Hr()),
dbc.Col(width=2,xs=12, sm=12,md=1,lg=1,xl=1),
dbc.Col(html.H3("BAJA TT 2022"),width=4,xs=12, sm=12,md=4,lg=4,xl=4),
dbc.Col(width=4,xs=12, sm=12,md=1,lg=4,xl=4),
dbc.Col(html.Img(src=CONFIRM_LOGO, height="37px"),width=2,xs=12, sm=12,md=1,lg=1,xl=1), # CONFIRM LOGO - DO NOT REMOVE
],
),
],
),
dbc.Row(
[
dbc.Col(width=2,xs=12, sm=12,md=1,lg=2,xl=1),
dbc.Col(
html.P("CONFIRM by VOST PORTUGAL ")
),
],
),
],
style={"height": "20%", "background-color": "#1D1E2C"},
),
dbc.Row(
[
dbc.Col(
dcc.Graph(id='map'), width=2,xs=12, sm=12,md=12,lg=12,xl=4,
),
dbc.Col(
dbc.Row(
[
dbc.Card(
[
dbc.CardHeader("TOTAL INCIDENTS", style={"background": "#FF495C","color":"white"}),
dbc.CardBody(
[
html.H6("TOTAL INCIDENTES", style={"color":"#FF495C"}, className="card-title"),
html.H4(id="totals"),
],
),
],
),
dbc.Card(
[
dbc.CardHeader("TOTAL WARNINGS", style={"background": "#C81D25","color":"white"}),
dbc.CardBody(
[
html.H6("RACE DIRECTOR", style={"color":"#C81D25"}, className="card-title"),
html.H4(id="total_warnings"),
],
),
],
),
dbc.Card(
[
dbc.CardHeader("BREAKDOWNS", style={"background": "#DE6E4B","color":"white"}),
dbc.CardBody(
[
html.H6("AVARIAS", style={"color":"#DE6E4B"}, className="card-title"),
html.H4(id="total_breakdowns"),
],
),
],
),
],
),
width=2,xs=12, sm=12,md=12,lg=6,xl=2,
),
dbc.Col(
dbc.Row(
[
dbc.Card(
[
dbc.CardHeader("ACCIDENTS", style={"background": "#4F5D75","color":"white"}),
dbc.CardBody(
[
html.H6("ACIDENTES", style={"color":"#4F5D75"}, className="card-title"),
html.H4(id="total_accidents"),
],
),
],
),
dbc.Card(
[
dbc.CardHeader("CONFIRMED OUT OF RACE", style={"background": "#2D3142","color":"white"}),
dbc.CardBody(
[
html.H6("DESISTÊNCIA", style={"color":"#2D3142"}, className="card-title"),
html.H4(id="total_gaveup_confirmed"),
],
),
],
),
dbc.Card(
[
dbc.CardHeader("NON-CONFIRMED OUT OF RACE", style={"background": "#242424","color":"white"}),
dbc.CardBody(
[
html.H6("DESISTÊNCIA NC", style={"color":"#242424"}, className="card-title"),
html.H4(id="total_gaveup_nconfirmed"),
],
),
],
),
],
),
width=2,xs=12, sm=12,md=12,lg=6,xl=2,
),
dbc.Col(
dbc.Row(dcc.Graph(id='pie')),
width=3,xs=12, sm=12,md=12,lg=12,xl=3,
),
],
),
dbc.Row(
[
dbc.Col(
[
dbc.Row(dcc.Graph(id='timeline'))
],
),
],
style={"height": "10%", "background-color": "#242424"},
),
dbc.Row(
[
dbc.Col(
[
dbc.Row(
[
dbc.Col(width=4,xs=12, sm=12,md=4,lg=4,xl=4),
dbc.Col(
dbc.Row(
[
dbc.Row(dbc.Col(width=12),),
dbc.Row(html.H6("POWERED BY VOST PORTUGAL",style={"align":"center"}),),
dbc.Row(html.H6("VOST PORTUGAL for ACP MOTORSPORTS",style={"align":"center"}),),
dbc.Row(html.H6("CC BY-NC-SA 2022",style={"align":"center"}),),
],
),
),
],
style={"height": "20%", "background-color": "#242424"},
),
],
),
],
style={"height": "30%", "background-color": "#242424"},
),
],
style={"width":"100vw","height": "97vh"},
)
# DEFINE CALL BACKS
@app.callback(
Output(component_id="map",component_property="figure"),
Output(component_id="totals",component_property="children"),
Output(component_id="total_warnings",component_property="children"), # returns variable
Output(component_id="total_breakdowns",component_property="children"),
Output(component_id="total_accidents",component_property="children"),
Output(component_id="total_gaveup_confirmed",component_property="children"), # returns variable
Output(component_id="total_gaveup_nconfirmed",component_property="children"), # returns table # returns table
Output(component_id="pie",component_property="figure"),
Output(component_id="timeline",component_property="figure"),
Input(component_id="interval-component", component_property="n_intervals"), # Triggers Call Back based on time update
)
# WHAT HAPPENS WHEN CALL BACK IS TRIGGERED
def confirmUupdate(value):
# DATA TREATMENT
df_ss1_cc = pd.read_csv('ss1_cc.csv')
df_live_incidents = pd.read_csv('https://docs.google.com/spreadsheets/d/e/2PACX-1vT_L10XsTy6OEUN6OOOdEbLDeMzAW000x2bmgXF5acnOY6v8lJpooMiOg4uFQ3e3CI2MfFdDB07I5X_/pub?gid=812677681&single=true&output=csv')
df_live_cc = pd.read_csv('https://docs.google.com/spreadsheets/d/e/2PACX-1vT_L10XsTy6OEUN6OOOdEbLDeMzAW000x2bmgXF5acnOY6v8lJpooMiOg4uFQ3e3CI2MfFdDB07I5X_/pub?gid=1268287201&single=true&output=csv')
df_live_warnings =
|
pd.read_csv('https://docs.google.com/spreadsheets/d/e/2PACX-<KEY>_/pub?gid=1026955157&single=true&output=csv')
|
pandas.read_csv
|
import glob
import os
import sys
utils_path = os.path.join(os.path.abspath(os.getenv('PROCESSING_DIR')),'utils')
if utils_path not in sys.path:
sys.path.append(utils_path)
import util_files
import util_cloud
import util_carto
import logging
from ftplib import FTP
import urllib
import numpy as np
import pandas as pd
from zipfile import ZipFile
# Set up logging
# Get the top-level logger object
logger = logging.getLogger()
for handler in logger.handlers: logger.removeHandler(handler)
logger.setLevel(logging.INFO)
# make it print to the console.
console = logging.StreamHandler()
logger.addHandler(console)
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# name of table on Carto where you want to upload data
# this should be a table name that is not currently in use
dataset_name = 'dis_017_storm_events_us' #check
logger.info('Executing script for dataset: ' + dataset_name)
# create a new sub-directory within your specified dir called 'data'
# within this directory, create files to store raw and processed data
data_dir = util_files.prep_dirs(dataset_name)
'''
Download data and save to your data directory.
Two types of files are being used: details and locations.
Using bulk download via FTP, link to NOAA's storm events database:
https://www.ncdc.noaa.gov/stormevents/ftp.jsp
'''
# connect to the FTP server and login anonymously
ftp = FTP('ftp.ncdc.noaa.gov', timeout = 30)
ftp.login()
# navigate to the correct directory and get a list of all filenames
ftp.cwd('/pub/data/swdi/stormevents/csvfiles/')
filenames = ftp.nlst()
# retrieve a sorted list of the details files
details_files = []
for filename in filenames:
if not filename.startswith('StormEvents_details-ftp_v1.0_d'):
continue
details_files.append(filename)
details_files.sort()
# retrieve a sorted list of the locations files
locations_files = []
for filename in filenames:
if not filename.startswith('StormEvents_locations-ftp_v1.0_d'):
continue
locations_files.append(filename)
locations_files.sort()
def ftp_download(file_dir):
'''
download data
INPUT file_dir: ftp location of file to download (string)
'''
for filename in file_dir:
with open(os.path.join(data_dir, filename), 'wb') as fo:
ftp.retrbinary("RETR " + filename, fo.write)
# download data from the source FTP
ftp_download(details_files)
ftp_download(locations_files)
'''
Process data
'''
#Concatenating details and locations files
raw_data_file = glob.glob(os.path.join(data_dir, "*.gz")) # advisable to use os.path.join as this makes concatenation OS independent
details_list = []
locations_list = []
# go through each file, turn it into a dataframe, and append that df to one of two lists, based on if it
# is a details file or a locations file
for file in raw_data_file:
if file.startswith('data/StormEvents_details-ftp_v1.0_d'):
df = pd.read_csv(file)
details_list.append(df)
elif file.startswith('data/StormEvents_locations-ftp_v1.0_d'):
df_1 = pd.read_csv(file)
locations_list.append(df_1)
else: print('error')
# concatenate tables for every year into one table for the details files and one table for the locations files
details_concatenated =
|
pd.concat(details_list, ignore_index=True)
|
pandas.concat
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.